Skip to content

CLI documentation

module containing the state dataclass and the singleton in charge of supporting window transitions in the CLI.

PPStateData dataclass

data class used for state conservation.

Source code in CLI/ProcessProphet.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
@dataclass
class PPStateData: 
    """
    data class used for state conservation. 
    """
    projects_path: str # path where all projects are saved

    current_project: str|None # current project name

    model_trained: bool # whether a model has been trained

    predictive_df_generated: bool # same
    petri_net_generated: bool # same


    #: the following paths are subfolders of the current project. they are set for convenience. 
    input_logs_path: str|None
    models_path: str|None
    petri_nets_path: str|None
    predictive_logs_path:str|None
    partial_traces_path: str|None
    multiple_predictions_path: str|None
    mode: ProcessProphetMode | None

ProcessProphet

this class is intended for window management. this class works as a singleton. the other ProcessProphet classes (such as ProcessProphetStart) will be always provided with the same instance of this class and will basically determine the content of self.current_window.

there can only be one instance of this class, as there is only one terminal to draw in. therefore this class is a singleton.

Source code in CLI/ProcessProphet.py
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
class ProcessProphet(metaclass = SingletonMeta):
    """
    this class is intended for window management. 
    this class works as a singleton. the other ProcessProphet classes (such as ProcessProphetStart)
    will be always provided with the same instance of this class and will basically determine 
    the content of `self.current_window`. 

    there can only be one instance of this class, as there is only one terminal to draw in. therefore this 
    class is a singleton.
    """
    def __init__(self):
        self.state = PPStateData("projects", None, False, False, False, None, None, None,None,None,None, None) 

        #: window manager object from pytermgui. this object handles 
        #: window lifecycle.  windows have nice properties such as
        #: being resizable. 
        self.manager = ptg.WindowManager() 

        #: the current window's content
        self.current_window = None 

        #: variable used for styling
        self.button_color = "[black]"

        #: use 80% of the window width
        self.window_width = self.calc_term_size()
        self.window_height = 50

    def calc_term_size(self):
        return int(os.get_terminal_size(0)[0]*0.8)



    def set_current_window(self, window): 
        """
        Sets the current window.

        Args:
            window (object): The new window.
        """
        self.current_window = window 
    def remove_current_window(self): 
        """
        Removes the current window.

        Args:
            window (object): The new window.
        """
        self.manager.remove(self.current_window)

    def switch_window(self, new_window):
        """
        In charge of switching windows.

        Args:
            new_window (object): The new window.
        """
        with ptg.YamlLoader() as loader: #: loads the styles from `styles.yaml`
            loader.load(CONFIG)
        if self.current_window !=None: #for not initialized case
            self.remove_current_window()

        #: changes the window
        self.set_current_window(new_window)
        self.manager.add(new_window)
        self.manager.focus(new_window)

    def run(self):
        with self.manager:
            #: run the app.
            self.manager.run()

remove_current_window()

Removes the current window.

Parameters:

Name Type Description Default
window object

The new window.

required
Source code in CLI/ProcessProphet.py
102
103
104
105
106
107
108
109
def remove_current_window(self): 
    """
    Removes the current window.

    Args:
        window (object): The new window.
    """
    self.manager.remove(self.current_window)

set_current_window(window)

Sets the current window.

Parameters:

Name Type Description Default
window object

The new window.

required
Source code in CLI/ProcessProphet.py
 94
 95
 96
 97
 98
 99
100
101
def set_current_window(self, window): 
    """
    Sets the current window.

    Args:
        window (object): The new window.
    """
    self.current_window = window 

switch_window(new_window)

In charge of switching windows.

Parameters:

Name Type Description Default
new_window object

The new window.

required
Source code in CLI/ProcessProphet.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def switch_window(self, new_window):
    """
    In charge of switching windows.

    Args:
        new_window (object): The new window.
    """
    with ptg.YamlLoader() as loader: #: loads the styles from `styles.yaml`
        loader.load(CONFIG)
    if self.current_window !=None: #for not initialized case
        self.remove_current_window()

    #: changes the window
    self.set_current_window(new_window)
    self.manager.add(new_window)
    self.manager.focus(new_window)

SingletonMeta

Bases: type

singleton metaclass taken from https://refactoring.guru/design-patterns/singleton/python/examples

Source code in CLI/ProcessProphet.py
47
48
49
50
51
52
53
54
55
56
57
class SingletonMeta(type):
    """
    singleton metaclass taken from `https://refactoring.guru/design-patterns/singleton/python/examples`
    """
    _instances = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            instance = super().__call__(*args, **kwargs)
            cls._instances[cls] = instance
        return cls._instances[cls]

This process is in charge of project creation/selection, user mode selection and then action selection (training, preprocessing, prediction generation, ...)

ProcessProphetStart

This class defines the windows for the initial part of the program, i.e.: - project creation - project selection - user mode selection It also sets the pp.state path variables once a project has been created/selected.

Source code in CLI/ProcessProphetStart.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
class ProcessProphetStart: 
    """
    This class defines the windows for the initial part of the program, i.e.:
    - project creation
    - project selection
    - user mode selection
    It also sets the `pp.state` path variables once a project has been created/selected.
    """
    def __init__(self, pp, start:bool= True):
        """
        Initialize ProcessProphet Object and main menu.

        Args:
            pp (ProcessProphet): The ProcessProphet instance in charge of window management.
            start (bool, optional): If set to True, we start at the very beginning, i.e., project selection/creation. 
                        Otherwise, we go straight into the manager selection. Defaults to True.
        """
        self.pp = pp
        if start: 
            self.pp.switch_window(self.main_menu())
        else: 
            self.pp.switch_window(self.select_manager())


    def launch_preprocessor(self):
        """
        launches the Preprocessing CLI interface. 
        the constructor calls the window change
        """
        preprocessor= ProcessProphetPreprocessing(self.pp)

    def launch_trainer(self):
        """
        launches the Training CLI interface 
        the constructor calls the window change
        """
        trainer = ProcessProphetTrain(self.pp)

    def launch_predictor(self):
        """
        launches the Predictor CLI interface 
        the constructor calls the window change
        """
        predictor = ProcessProphetPredict(self.pp)
    def launch_conformance(self):
        """
        launches the Conformance checking CLI interface 
        the constructor calls the window change
        """
        conformance_checker = ProcessProphetModel(self.pp)


    def select_manager(self) : 
        """
        after selecting the project and user mode, the user picks one of the managers in ProcessProphet 
        (preprocessing, training, prediction generation and conformance checking)
        """
        container = [
            ptg.Label(f"select one of the following actions:"),
            "",
            ptg.Button(f"{self.pp.button_color}import and filter log", lambda *_: self.launch_preprocessor()), 
            "",
            ptg.Button(f"{self.pp.button_color}train neural network", lambda *_: self.launch_trainer()), 
            "",
            ptg.Button(f"{self.pp.button_color}make predictions", lambda *_: self.launch_predictor()), 
            "",
            ptg.Button(f"{self.pp.button_color}conformance checking", lambda *_:self.launch_conformance()), 
            "",
            ptg.Button(f"{self.pp.button_color}back to menu", lambda *_: self.pp.switch_window(self.main_menu())), 
        ] 

        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window

    def notify_project_creation(self, message, success):
        """
        function used to indicate that the new project name is valid
        as a result the window is switched to the menu for selecting the mode the currrent project is going to run in
        """ 
        if success: 
            container =ptg.Container( 
                ptg.Label(f"{message}"),
                "",
                ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_mode())), 
                "",
                ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
            )
        else: 
            container = ptg.Container( 
                ptg.Label(f"{message}!"),
                "",
                ptg.Button(f"{self.pp.button_color}back to menu", lambda *_: self.pp.switch_window(self.main_menu()))
            )


        window = ptg.Window(container, box="DOUBLE")
        window.center()
        return window 

    def handle_project_name_input(self):
        """
        Exception if a new project is created with a name that is already used for another project in the projects directory.
        The user can return to the previous menu to create a new project with a different name.

        If there is a valid input for the new project (unique name), then all of the necessary subdirectories are created where the 
        files needed for the different functionalities of the application are stored. For example, a subdirectory for the input log on which
        the RNN can then be trained.
        The user can then continue and select the mode in which they want to work in the new project.

        At the same time, the state is updated (see `ProcessProphetState`).

        We use the following file structure: 
            - `projects/`: Contains all projects.
            - `projects/dummy_project/`: Contains all important subfolders for `dummy_project`.
            - `projects/dummy_project/input_logs`: All input logs used for `dummy_project` should be stored in this folder.
            - `projects/dummy_project/models`: All models used for `dummy_project` are generated in this folder.
            - `projects/dummy_project/petri_nets`: All petri nets used for `dummy_project` are stored here.
            - `projects/dummy_project/predictive_logs`: All generated predictive logs used for `dummy_project` and conformance checking are stored here.
            - `projects/dummy_project/partial_traces`: All input partial traces given by the user are searched inside this folder.  
            - `projects/dummy_project/multiple_predictions_path`: All predictions created using the multiple predictions function are stored here (for the `dummy_project` project).
        """
        name = self.project_name_input.value
        message = ""
        if not os.path.isdir(self.pp.state.projects_path):
            os.mkdir(self.pp.state.projects_path)

        if name in os.listdir(f"{self.pp.state.projects_path}"):
            #: check if project already exists
            message = "directory already exists"
            container = ptg.Container(
                message, 
                "",
                ptg.Button("{self.pp.button_color}return", lambda *_: self.pp.switch_window(self.new_project_form()))
            )

            window = ptg.Window(container, box="DOUBLE")
            window.center()
            self.pp.switch_window(window)
            return

        message = f"directory created in path {os.getcwd()}/{self.pp.state.projects_path}/{name}"
        subdirectories = ["input_logs", "models", "petri_nets", "predictive_logs", "partial_traces", "multiple_predictions_path"]
        #: create the directories
        os.mkdir(f"{os.getcwd()}/{self.pp.state.projects_path}/{name}")
        self.pp.state.current_project = name
        self.pp.state.input_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/input_logs"
        self.pp.state.models_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/models"
        self.pp.state.petri_nets_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/petri_nets"
        self.pp.state.predictive_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/predictive_logs"
        self.pp.state.partial_traces_path = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/partial_traces"
        self.pp.state.multiple_predictions_path= f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/multiple_predictions_path"

        for subdirectory in subdirectories: 
            os.mkdir(f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/{subdirectory}")

        container =ptg.Container( 
            ptg.Label(f"{message}"),
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_mode())), 
            "",
            ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
        )


        window = ptg.Window(container, box="DOUBLE")
        window.center()

        self.pp.switch_window(self.notify_project_creation(message, True))


    def handle_select_mode(self, mode: ProcessProphetMode):
        """
        indicates the previously selected mode the current project will be running in
        selected mode can be confirmed or changed if it was a missinput -> window either changes to previous menu or next menu to select further actions
        """
        self.pp.mode = mode
        container = [
            f"Currently in {mode.name} mode", 
            "", 
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_manager())), 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.select_mode()))
        ]

        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window



    def select_mode(self):
        """
        menu to select whether the application should be run in quick or advanced mode. 
        """ 
        container = [
            "Select a mode", 
            "",
            ptg.Button(f"{self.pp.button_color}quick", lambda *_: self.pp.switch_window(self.handle_select_mode(ProcessProphetMode.quick))),
            "",
            ptg.Button(f"{self.pp.button_color}advanced", lambda *_: self.pp.switch_window(self.handle_select_mode(ProcessProphetMode.advanced)))
        ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window


    def handle_project_selection(self):
        """
        checks if the selected project exists and updates the `pp.state` with the directories that are needed for the different functionalities of the application
        e.g. "partial_traces" directory in order to make predictions

        The user is notified in the current window if the project is successfully selected and can then pursue further actions like selecting the mode
        of the application

        If the user enters a wrong file name the current window displays the error and the user can go back to the previous menu
        """

        projects = [project  for project in os.listdir(f"{self.pp.state.projects_path}")]
        name= self.input_select_project.value
        if name in projects: 

            self.pp.state.input_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/input_logs"
            self.pp.state.models_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/models"
            self.pp.state.petri_nets_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/petri_nets"
            self.pp.state.predictive_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/predictive_logs"
            self.pp.state.partial_traces_path = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/partial_traces"
            self.pp.state.multiple_predictions_path= f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/multiple_predictions_path"
            container =[  
                "Project selected successfully", 
                "",
                ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_mode())), "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.main_menu()))
            ]
            window = ptg.Window(*container, box="DOUBLE")
            window.center()
            return window

        else: 
            container =  [ 
                "Project does not exist", 
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.load_existing_project()))
            ]
            window = ptg.Window(*container, box="DOUBLE")
            window.center()
            return window


    def load_existing_project(self):
        """
        user can load an existing project by entering the name of the existing project
        if intended the user can return to the main menu or quit the application
        """ 

        projects = [f"{project}"  for project in os.listdir(f"{self.pp.state.projects_path}")]


        self.input_select_project= ptg.InputField(projects[0],  prompt="enter a project name: ")

        left_container = ptg.Container(
            "[underline]Select a project", 
            "",
            self.input_select_project, 
            "",
            ptg.Button(f"{self.pp.button_color}Select", lambda *_: self.pp.switch_window(self.handle_project_selection())), 
            "", 
            ptg.Button(f"{self.pp.button_color}Back", lambda *_: self.pp.switch_window(self.main_menu()))
        )

        right_container= ptg.Container(
            "[underline]Existing projects", 
            *projects 
        )
        window = ptg.Window(ptg.Splitter(left_container,right_container), width = self.pp.window_width)
        #window = ptg.Window(*c, box="DOUBLE")
        window.center()
        return window

    def new_project_form(self):
        """
        user can create a new project and input a name for it
        if intended the user can return to the main menu or quit the application
        """

        self.project_name_input =  ptg.InputField("first Prophet", prompt="Project name: ")
        container =[ 
            ptg.Label(f"Create new project"),
            ptg.Label(f"current path: {os.getcwd()}/{self.pp.state.projects_path}"),
            "", 
            self.project_name_input, 
            "", 
            ptg.Button(f"{self.pp.button_color}Create project", lambda *_: self.handle_project_name_input()), 
            "", 
            ptg.Button(f"{self.pp.button_color}Back to start", lambda *_: self.pp.switch_window(self.main_menu())), 
            "", 
            ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
        ] 

        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window


    def main_menu(self):
        """
        creates main menu for selecting a project to work on 
        """
        container =[ 
            ptg.Label("Welcome to [yellow]Process Prophet"),
            "", 
            ptg.Label("Choose one option:"),
            "", 
            ptg.Button(f"{self.pp.button_color}Create new project", lambda *_: self.pp.switch_window(self.new_project_form())),
            "", 
            ptg.Button(f"{self.pp.button_color}Load existing project", lambda *_: self.pp.switch_window(self.load_existing_project())),
            "", 
            ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
        ] 

        window = ptg.Window(*container, title = "Process Prophet")
        window.center()
        return window

__init__(pp, start=True)

Initialize ProcessProphet Object and main menu.

Parameters:

Name Type Description Default
pp ProcessProphet

The ProcessProphet instance in charge of window management.

required
start bool

If set to True, we start at the very beginning, i.e., project selection/creation. Otherwise, we go straight into the manager selection. Defaults to True.

True
Source code in CLI/ProcessProphetStart.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
def __init__(self, pp, start:bool= True):
    """
    Initialize ProcessProphet Object and main menu.

    Args:
        pp (ProcessProphet): The ProcessProphet instance in charge of window management.
        start (bool, optional): If set to True, we start at the very beginning, i.e., project selection/creation. 
                    Otherwise, we go straight into the manager selection. Defaults to True.
    """
    self.pp = pp
    if start: 
        self.pp.switch_window(self.main_menu())
    else: 
        self.pp.switch_window(self.select_manager())

handle_project_name_input()

Exception if a new project is created with a name that is already used for another project in the projects directory. The user can return to the previous menu to create a new project with a different name.

If there is a valid input for the new project (unique name), then all of the necessary subdirectories are created where the files needed for the different functionalities of the application are stored. For example, a subdirectory for the input log on which the RNN can then be trained. The user can then continue and select the mode in which they want to work in the new project.

At the same time, the state is updated (see ProcessProphetState).

We use the following file structure
  • projects/: Contains all projects.
  • projects/dummy_project/: Contains all important subfolders for dummy_project.
  • projects/dummy_project/input_logs: All input logs used for dummy_project should be stored in this folder.
  • projects/dummy_project/models: All models used for dummy_project are generated in this folder.
  • projects/dummy_project/petri_nets: All petri nets used for dummy_project are stored here.
  • projects/dummy_project/predictive_logs: All generated predictive logs used for dummy_project and conformance checking are stored here.
  • projects/dummy_project/partial_traces: All input partial traces given by the user are searched inside this folder.
  • projects/dummy_project/multiple_predictions_path: All predictions created using the multiple predictions function are stored here (for the dummy_project project).
Source code in CLI/ProcessProphetStart.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def handle_project_name_input(self):
    """
    Exception if a new project is created with a name that is already used for another project in the projects directory.
    The user can return to the previous menu to create a new project with a different name.

    If there is a valid input for the new project (unique name), then all of the necessary subdirectories are created where the 
    files needed for the different functionalities of the application are stored. For example, a subdirectory for the input log on which
    the RNN can then be trained.
    The user can then continue and select the mode in which they want to work in the new project.

    At the same time, the state is updated (see `ProcessProphetState`).

    We use the following file structure: 
        - `projects/`: Contains all projects.
        - `projects/dummy_project/`: Contains all important subfolders for `dummy_project`.
        - `projects/dummy_project/input_logs`: All input logs used for `dummy_project` should be stored in this folder.
        - `projects/dummy_project/models`: All models used for `dummy_project` are generated in this folder.
        - `projects/dummy_project/petri_nets`: All petri nets used for `dummy_project` are stored here.
        - `projects/dummy_project/predictive_logs`: All generated predictive logs used for `dummy_project` and conformance checking are stored here.
        - `projects/dummy_project/partial_traces`: All input partial traces given by the user are searched inside this folder.  
        - `projects/dummy_project/multiple_predictions_path`: All predictions created using the multiple predictions function are stored here (for the `dummy_project` project).
    """
    name = self.project_name_input.value
    message = ""
    if not os.path.isdir(self.pp.state.projects_path):
        os.mkdir(self.pp.state.projects_path)

    if name in os.listdir(f"{self.pp.state.projects_path}"):
        #: check if project already exists
        message = "directory already exists"
        container = ptg.Container(
            message, 
            "",
            ptg.Button("{self.pp.button_color}return", lambda *_: self.pp.switch_window(self.new_project_form()))
        )

        window = ptg.Window(container, box="DOUBLE")
        window.center()
        self.pp.switch_window(window)
        return

    message = f"directory created in path {os.getcwd()}/{self.pp.state.projects_path}/{name}"
    subdirectories = ["input_logs", "models", "petri_nets", "predictive_logs", "partial_traces", "multiple_predictions_path"]
    #: create the directories
    os.mkdir(f"{os.getcwd()}/{self.pp.state.projects_path}/{name}")
    self.pp.state.current_project = name
    self.pp.state.input_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/input_logs"
    self.pp.state.models_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/models"
    self.pp.state.petri_nets_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/petri_nets"
    self.pp.state.predictive_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/predictive_logs"
    self.pp.state.partial_traces_path = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/partial_traces"
    self.pp.state.multiple_predictions_path= f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/multiple_predictions_path"

    for subdirectory in subdirectories: 
        os.mkdir(f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/{subdirectory}")

    container =ptg.Container( 
        ptg.Label(f"{message}"),
        "",
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_mode())), 
        "",
        ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
    )


    window = ptg.Window(container, box="DOUBLE")
    window.center()

    self.pp.switch_window(self.notify_project_creation(message, True))

handle_project_selection()

checks if the selected project exists and updates the pp.state with the directories that are needed for the different functionalities of the application e.g. "partial_traces" directory in order to make predictions

The user is notified in the current window if the project is successfully selected and can then pursue further actions like selecting the mode of the application

If the user enters a wrong file name the current window displays the error and the user can go back to the previous menu

Source code in CLI/ProcessProphetStart.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
def handle_project_selection(self):
    """
    checks if the selected project exists and updates the `pp.state` with the directories that are needed for the different functionalities of the application
    e.g. "partial_traces" directory in order to make predictions

    The user is notified in the current window if the project is successfully selected and can then pursue further actions like selecting the mode
    of the application

    If the user enters a wrong file name the current window displays the error and the user can go back to the previous menu
    """

    projects = [project  for project in os.listdir(f"{self.pp.state.projects_path}")]
    name= self.input_select_project.value
    if name in projects: 

        self.pp.state.input_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/input_logs"
        self.pp.state.models_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/models"
        self.pp.state.petri_nets_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/petri_nets"
        self.pp.state.predictive_logs_path  = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/predictive_logs"
        self.pp.state.partial_traces_path = f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/partial_traces"
        self.pp.state.multiple_predictions_path= f"{os.getcwd()}/{self.pp.state.projects_path}/{name}/multiple_predictions_path"
        container =[  
            "Project selected successfully", 
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_mode())), "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.main_menu()))
        ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window

    else: 
        container =  [ 
            "Project does not exist", 
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.load_existing_project()))
        ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window

handle_select_mode(mode)

indicates the previously selected mode the current project will be running in selected mode can be confirmed or changed if it was a missinput -> window either changes to previous menu or next menu to select further actions

Source code in CLI/ProcessProphetStart.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
def handle_select_mode(self, mode: ProcessProphetMode):
    """
    indicates the previously selected mode the current project will be running in
    selected mode can be confirmed or changed if it was a missinput -> window either changes to previous menu or next menu to select further actions
    """
    self.pp.mode = mode
    container = [
        f"Currently in {mode.name} mode", 
        "", 
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_manager())), 
        "",
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.select_mode()))
    ]

    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

launch_conformance()

launches the Conformance checking CLI interface the constructor calls the window change

Source code in CLI/ProcessProphetStart.py
70
71
72
73
74
75
def launch_conformance(self):
    """
    launches the Conformance checking CLI interface 
    the constructor calls the window change
    """
    conformance_checker = ProcessProphetModel(self.pp)

launch_predictor()

launches the Predictor CLI interface the constructor calls the window change

Source code in CLI/ProcessProphetStart.py
64
65
66
67
68
69
def launch_predictor(self):
    """
    launches the Predictor CLI interface 
    the constructor calls the window change
    """
    predictor = ProcessProphetPredict(self.pp)

launch_preprocessor()

launches the Preprocessing CLI interface. the constructor calls the window change

Source code in CLI/ProcessProphetStart.py
50
51
52
53
54
55
def launch_preprocessor(self):
    """
    launches the Preprocessing CLI interface. 
    the constructor calls the window change
    """
    preprocessor= ProcessProphetPreprocessing(self.pp)

launch_trainer()

launches the Training CLI interface the constructor calls the window change

Source code in CLI/ProcessProphetStart.py
57
58
59
60
61
62
def launch_trainer(self):
    """
    launches the Training CLI interface 
    the constructor calls the window change
    """
    trainer = ProcessProphetTrain(self.pp)

load_existing_project()

user can load an existing project by entering the name of the existing project if intended the user can return to the main menu or quit the application

Source code in CLI/ProcessProphetStart.py
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
def load_existing_project(self):
    """
    user can load an existing project by entering the name of the existing project
    if intended the user can return to the main menu or quit the application
    """ 

    projects = [f"{project}"  for project in os.listdir(f"{self.pp.state.projects_path}")]


    self.input_select_project= ptg.InputField(projects[0],  prompt="enter a project name: ")

    left_container = ptg.Container(
        "[underline]Select a project", 
        "",
        self.input_select_project, 
        "",
        ptg.Button(f"{self.pp.button_color}Select", lambda *_: self.pp.switch_window(self.handle_project_selection())), 
        "", 
        ptg.Button(f"{self.pp.button_color}Back", lambda *_: self.pp.switch_window(self.main_menu()))
    )

    right_container= ptg.Container(
        "[underline]Existing projects", 
        *projects 
    )
    window = ptg.Window(ptg.Splitter(left_container,right_container), width = self.pp.window_width)
    #window = ptg.Window(*c, box="DOUBLE")
    window.center()
    return window

main_menu()

creates main menu for selecting a project to work on

Source code in CLI/ProcessProphetStart.py
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
def main_menu(self):
    """
    creates main menu for selecting a project to work on 
    """
    container =[ 
        ptg.Label("Welcome to [yellow]Process Prophet"),
        "", 
        ptg.Label("Choose one option:"),
        "", 
        ptg.Button(f"{self.pp.button_color}Create new project", lambda *_: self.pp.switch_window(self.new_project_form())),
        "", 
        ptg.Button(f"{self.pp.button_color}Load existing project", lambda *_: self.pp.switch_window(self.load_existing_project())),
        "", 
        ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
    ] 

    window = ptg.Window(*container, title = "Process Prophet")
    window.center()
    return window

new_project_form()

user can create a new project and input a name for it if intended the user can return to the main menu or quit the application

Source code in CLI/ProcessProphetStart.py
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
def new_project_form(self):
    """
    user can create a new project and input a name for it
    if intended the user can return to the main menu or quit the application
    """

    self.project_name_input =  ptg.InputField("first Prophet", prompt="Project name: ")
    container =[ 
        ptg.Label(f"Create new project"),
        ptg.Label(f"current path: {os.getcwd()}/{self.pp.state.projects_path}"),
        "", 
        self.project_name_input, 
        "", 
        ptg.Button(f"{self.pp.button_color}Create project", lambda *_: self.handle_project_name_input()), 
        "", 
        ptg.Button(f"{self.pp.button_color}Back to start", lambda *_: self.pp.switch_window(self.main_menu())), 
        "", 
        ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
    ] 

    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

notify_project_creation(message, success)

function used to indicate that the new project name is valid as a result the window is switched to the menu for selecting the mode the currrent project is going to run in

Source code in CLI/ProcessProphetStart.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
def notify_project_creation(self, message, success):
    """
    function used to indicate that the new project name is valid
    as a result the window is switched to the menu for selecting the mode the currrent project is going to run in
    """ 
    if success: 
        container =ptg.Container( 
            ptg.Label(f"{message}"),
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.select_mode())), 
            "",
            ptg.Button(f"{self.pp.button_color}Exit", lambda *_: self.pp.manager.stop())
        )
    else: 
        container = ptg.Container( 
            ptg.Label(f"{message}!"),
            "",
            ptg.Button(f"{self.pp.button_color}back to menu", lambda *_: self.pp.switch_window(self.main_menu()))
        )


    window = ptg.Window(container, box="DOUBLE")
    window.center()
    return window 

select_manager()

after selecting the project and user mode, the user picks one of the managers in ProcessProphet (preprocessing, training, prediction generation and conformance checking)

Source code in CLI/ProcessProphetStart.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def select_manager(self) : 
    """
    after selecting the project and user mode, the user picks one of the managers in ProcessProphet 
    (preprocessing, training, prediction generation and conformance checking)
    """
    container = [
        ptg.Label(f"select one of the following actions:"),
        "",
        ptg.Button(f"{self.pp.button_color}import and filter log", lambda *_: self.launch_preprocessor()), 
        "",
        ptg.Button(f"{self.pp.button_color}train neural network", lambda *_: self.launch_trainer()), 
        "",
        ptg.Button(f"{self.pp.button_color}make predictions", lambda *_: self.launch_predictor()), 
        "",
        ptg.Button(f"{self.pp.button_color}conformance checking", lambda *_:self.launch_conformance()), 
        "",
        ptg.Button(f"{self.pp.button_color}back to menu", lambda *_: self.pp.switch_window(self.main_menu())), 
    ] 

    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

select_mode()

menu to select whether the application should be run in quick or advanced mode.

Source code in CLI/ProcessProphetStart.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def select_mode(self):
    """
    menu to select whether the application should be run in quick or advanced mode. 
    """ 
    container = [
        "Select a mode", 
        "",
        ptg.Button(f"{self.pp.button_color}quick", lambda *_: self.pp.switch_window(self.handle_select_mode(ProcessProphetMode.quick))),
        "",
        ptg.Button(f"{self.pp.button_color}advanced", lambda *_: self.pp.switch_window(self.handle_select_mode(ProcessProphetMode.advanced)))
    ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

This modules gives access to some preprocessing functions.

ProcessProphetPreprocessing

Source code in CLI/ProcessProphetPreprocessing.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
class ProcessProphetPreprocessing: 
    def __init__(self, pp):
        """
        Initializes a ProcessProphetPreprocessing object and sets up the preprocessing main menu.

        Args:
            pp (ProcessProphet): The ProcessProphet instance in charge of window management.
        """
        self.pp = pp  # Reference to the ProcessProphet object
        self.pp.switch_window(self.preprocessing_main_menu())  # Starts with the preprocessing main menu


    #: this decorator is used for type checking
    @staticmethod
    def check_types(func):
        """
        Decorator that checks if the file is an accepted file type (xes/csv) and if the file exists in the project directory.

        Args:
            func: The function that `check_types` decorates.

        Returns:
            The decorated function.

        Side effects:
            If the file type or existence restrictions are not followed, a new window with the corresponding error is indicated.
        """
        def wrapper(self, *args, **kwargs):
            """
            first it checks if the file is an excepted file type 
            (xes/csv) and the file also exists in the directory of 
            the project and then calls the original function
            """
            if self.log_name.value[-3:]!="xes" and self.log_name.value[-3:] != "csv":
                # error if file is in the wrong file type 
                container= ptg.Container( 
                    ptg.Label(f"only xes/csv supported"),
                    ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
                )
                window = ptg.Window( container, box="DOUBLE")
                window.center()
                return window
            elif self.log_name.value not in os.listdir(self.pp.state.input_logs_path):
                # error if file does not exist in the directory
                container= ptg.Container( 
                    ptg.Label(f"the log does not exist"),
                    ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
                )
                window = ptg.Window( container, box="DOUBLE")
                window.center()
                return window
            # call original function the decorator was called on
            return func(self,*args, **kwargs)
        return wrapper

    def loading(self, message = ""):
        """
        function to indicate a message in a new window e.g. to 
        show that a process is loading
        """ 
        container = ptg.Container(
            "Loading...", 
            message
        )
        window = ptg.Window(container, box="DOUBLE")
        window.center()
        self.pp.switch_window(window)

    def return_to_menu(self):
        #: returns to the manager selection menu of `ProcessProphetStart`. Therefore, `start` is set to `False`
        pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, start = False)




    @check_types
    def handle_replace_nan_with_mode(self):
        """
        first decorator is used to ensure the file can be preprocessed

        sends a request to the server with all the needed parameters to do replace all NaN values in the log
        and in case of a successful computation of the request by the server the path where the preprocessed log
        is stored in will be indicated in a new window

        if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
        the user can go back to the window where the parameters are displayed
        """ 
        self.loading("preprocessing data...") # loading screen while data is being preprocessed
        input_logs_path= self.pp.state.input_logs_path

        #: checks if extension is xes. otherwise csv assumed
        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value, 
            "is_xes": is_xes, 
            "save_path": f"{input_logs_path}/{self.save_path.value}" ,
            "sep": ","
        } 

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/replace_with_mode", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code==200:
            # succesful and indicate path for preprocessed data
            data = response.json()
            container= ptg.Container( 
                ptg.Label(f"success"),
                f"log saved in path {data['save_path']}"
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
            )
        else:
            # error ocurred 
            data = response.json()
            container= ptg.Container( 
                ptg.Label(f"error: {data['error']}"),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
            )

        window = ptg.Window( container, box="DOUBLE")
        window.center()
        return window

    @check_types
    def handle_remove_duplicates(self):
        """
        first decorator is used to ensure the file can be preprocessed

        sends a request to the server with all the needed parameters to do remove duplicate rows from the log
        and in case of a successful computation of the request by the server the path where the preprocessed log
        is stored in will be indicated in a new window

        if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
        the user can go back to the window where the parameters are displayed
        """
        self.loading("preprocessing data...") # loading screen while data is being preprocessed
        input_logs_path= self.pp.state.input_logs_path

        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value, 
            "is_xes": is_xes, 
            "save_path": f"{input_logs_path}/{self.save_path.value}" ,
            "sep": ","
        } 

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/remove_duplicates", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code==200:
            # successful and indicate path for preprocessed data
            data = response.json()
            container= ptg.Container( 
                ptg.Label(f"success"),
                f"log saved in path {data['save_path']}"
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
            )
        else:
            # error ocurred 
            data = response.json()
            container= ptg.Container( 
                ptg.Label(f"error: {data['error']}"),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
            )

        window = ptg.Window( container, box="DOUBLE")
        window.center()
        return window





    @check_types
    def handle_add_unique_start_end(self):
        """
        first decorator is used to ensure the file can be preprocessed

        sends a request to the server with all the needed parameters to do add unique start end end activities to each trace
        and in case of a successful computation of the request by the server the path where the preprocessed log
        is stored in will be indicated in a new window

        if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
        the user can go back to the window where the parameters are displayed
        """
        self.loading("preprocessing data...") # loading screen while data is being preprocessed
        input_logs_path= self.pp.state.input_logs_path
        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value, 
            "is_xes": is_xes, 
            "save_path": f"{input_logs_path}/{self.save_path.value}" ,
            "sep": ","
        } 

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/add_unique_start_end", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code==200:
            # successful request
            data = response.json()
            container= ptg.Container( 
                ptg.Label(f"success"),
                f"log saved in path {data['save_path']}"
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
            )
        else: 
            # an error occured
            data = response.json()
            container= ptg.Container( 
                ptg.Label(f"error: {data['error']}"),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
            )

        window = ptg.Window( container, box="DOUBLE")
        window.center()
        return window


    def add_unique_start_end(self):
        """
        Indicates all the parameters needed to add unique start and end activities to each trace.

        The user can modify these parameters in the left side of the window.

        The function also displays the first few Log file names in the current project on the right side of the window.

        Side effects:
            - Initializes a window with default parameters where the user can adjust them.
            - Initializes a window where all the event logs of the current project are listed for preprocessing.
            - Calls the `add_unique_start_end` function if the user confirms the indicated parameters.
        """ 
        self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
        self.save_path=  ptg.InputField("HL_unique_start.csv", prompt="output log name:") # Name of the preprocessed copy of the log

        left_container = ptg.Container( 
            ptg.Label(f"enter relevant information"),
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.save_path,
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.handle_add_unique_start_end())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
        )

        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()

        window = ptg.Window(ptg.Splitter(left_container, right_container), width = self.pp.window_width)
        #window = ptg.Window(*container)
        window.center()
        return window




    def remove_duplicates(self):
        """
        This function indicates all the parameters that are needed to remove duplicate rows
        and the user can modify them in the left side of the window.

        The function also indicates the first few Log file names in the current project on
        the right side of the window.

        Side effects:
            - Initializes a window with default parameters where the user can adjust them.
            - Initializes a window where all the event logs of the current project are listed that can be used
            for the preprocessing.
            - Calls the `remove_duplicates` function if the user confirms the indicated parameters.
        """ 
        self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
        self.save_path=  ptg.InputField("HL_no_dup.csv", prompt="output log name:") # Name of the preprocessed copy of the log

        left_container = ptg.Container( 
            ptg.Label(f"enter relevant information"),
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.save_path,
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.handle_remove_duplicates())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
        )

        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()

        window = ptg.Window(ptg.Splitter(left_container, right_container), width = self.pp.window_width)
        #window = ptg.Window(*container)
        window.center()
        return window



    def replace_nan_with_mode(self):
        """
        This function indicates all the parameters that are needed to replace NaN values
        and the user can modify them in the left side of the window.

        The function also indicates the first few Log file names in the current project on
        the right side of the window.

        Side effects:
            - Initializes a window with default parameters where the user can adjust them.
            - Initializes a window where all the event logs of the current project are listed that can be used
            for the preprocessing.
            - Calls the `replace_nan_with_mode` function if the user confirms the indicated parameters.
        """ 
        self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
        self.save_path=  ptg.InputField("HL_nan_to_mode.csv", prompt="output log name:") # Name of the preprocessed copy of the log
        #indicates params
        left_container = ptg.Container( 
            ptg.Label(f"enter relevant information"),
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.save_path,
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.handle_replace_nan_with_mode())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
        )

        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal
        #shows logs in the current project
        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()

        window = ptg.Window(ptg.Splitter(left_container, right_container), width = self.pp.window_width)
        window.center()
        return window




    def preprocessing_main_menu(self):
        """
        Displays the main menu for the preprocessing manager.

        The user can choose one of the three alternatives:
            - Replacing NaN values in the log.
            - Removing duplicate rows in the log.
            - Adding unique start and end activities to each trace.

        It is also possible to return to the previous menu.
        """
        replace = f"{self.pp.button_color}replace NaN in activity column with mode"
        remove= f"{self.pp.button_color}remove duplicate rows"
        add= f"{self.pp.button_color}add unique start and end activities"

        container = ptg.Container(
            "select one action:", 
            ptg.Button(label = replace,onclick= lambda *_: self.pp.switch_window(self.replace_nan_with_mode())),
            "",
            ptg.Button(label = remove,onclick= lambda *_: self.pp.switch_window(self.remove_duplicates())),
            "",
            ptg.Button(label = add, onclick=lambda *_: self.pp.switch_window(self.add_unique_start_end())), 
            "",
            ptg.Button("back", lambda *_: self.return_to_menu())  


        )

        window = ptg.Window(container, box="DOUBLE", width= self.pp.window_width)
        window.center()
        return window

__init__(pp)

Initializes a ProcessProphetPreprocessing object and sets up the preprocessing main menu.

Parameters:

Name Type Description Default
pp ProcessProphet

The ProcessProphet instance in charge of window management.

required
Source code in CLI/ProcessProphetPreprocessing.py
20
21
22
23
24
25
26
27
28
def __init__(self, pp):
    """
    Initializes a ProcessProphetPreprocessing object and sets up the preprocessing main menu.

    Args:
        pp (ProcessProphet): The ProcessProphet instance in charge of window management.
    """
    self.pp = pp  # Reference to the ProcessProphet object
    self.pp.switch_window(self.preprocessing_main_menu())  # Starts with the preprocessing main menu

add_unique_start_end()

Indicates all the parameters needed to add unique start and end activities to each trace.

The user can modify these parameters in the left side of the window.

The function also displays the first few Log file names in the current project on the right side of the window.

Side effects
  • Initializes a window with default parameters where the user can adjust them.
  • Initializes a window where all the event logs of the current project are listed for preprocessing.
  • Calls the add_unique_start_end function if the user confirms the indicated parameters.
Source code in CLI/ProcessProphetPreprocessing.py
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def add_unique_start_end(self):
    """
    Indicates all the parameters needed to add unique start and end activities to each trace.

    The user can modify these parameters in the left side of the window.

    The function also displays the first few Log file names in the current project on the right side of the window.

    Side effects:
        - Initializes a window with default parameters where the user can adjust them.
        - Initializes a window where all the event logs of the current project are listed for preprocessing.
        - Calls the `add_unique_start_end` function if the user confirms the indicated parameters.
    """ 
    self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
    self.save_path=  ptg.InputField("HL_unique_start.csv", prompt="output log name:") # Name of the preprocessed copy of the log

    left_container = ptg.Container( 
        ptg.Label(f"enter relevant information"),
        self.log_name,
        self.case_id_key, 
        self.case_activity_key, 
        self.case_timestamp_key,
        self.save_path,
        "",
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.handle_add_unique_start_end())),
        "",
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
    )

    logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
    logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

    right_container = ptg.Container(
        f"[underline]First {len(logs)} logs in project:", *logs
    ).center()

    window = ptg.Window(ptg.Splitter(left_container, right_container), width = self.pp.window_width)
    #window = ptg.Window(*container)
    window.center()
    return window

check_types(func) staticmethod

Decorator that checks if the file is an accepted file type (xes/csv) and if the file exists in the project directory.

Parameters:

Name Type Description Default
func

The function that check_types decorates.

required

Returns:

Type Description

The decorated function.

Side effects

If the file type or existence restrictions are not followed, a new window with the corresponding error is indicated.

Source code in CLI/ProcessProphetPreprocessing.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
@staticmethod
def check_types(func):
    """
    Decorator that checks if the file is an accepted file type (xes/csv) and if the file exists in the project directory.

    Args:
        func: The function that `check_types` decorates.

    Returns:
        The decorated function.

    Side effects:
        If the file type or existence restrictions are not followed, a new window with the corresponding error is indicated.
    """
    def wrapper(self, *args, **kwargs):
        """
        first it checks if the file is an excepted file type 
        (xes/csv) and the file also exists in the directory of 
        the project and then calls the original function
        """
        if self.log_name.value[-3:]!="xes" and self.log_name.value[-3:] != "csv":
            # error if file is in the wrong file type 
            container= ptg.Container( 
                ptg.Label(f"only xes/csv supported"),
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
            )
            window = ptg.Window( container, box="DOUBLE")
            window.center()
            return window
        elif self.log_name.value not in os.listdir(self.pp.state.input_logs_path):
            # error if file does not exist in the directory
            container= ptg.Container( 
                ptg.Label(f"the log does not exist"),
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
            )
            window = ptg.Window( container, box="DOUBLE")
            window.center()
            return window
        # call original function the decorator was called on
        return func(self,*args, **kwargs)
    return wrapper

handle_add_unique_start_end()

first decorator is used to ensure the file can be preprocessed

sends a request to the server with all the needed parameters to do add unique start end end activities to each trace and in case of a successful computation of the request by the server the path where the preprocessed log is stored in will be indicated in a new window

if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and the user can go back to the window where the parameters are displayed

Source code in CLI/ProcessProphetPreprocessing.py
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
@check_types
def handle_add_unique_start_end(self):
    """
    first decorator is used to ensure the file can be preprocessed

    sends a request to the server with all the needed parameters to do add unique start end end activities to each trace
    and in case of a successful computation of the request by the server the path where the preprocessed log
    is stored in will be indicated in a new window

    if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
    the user can go back to the window where the parameters are displayed
    """
    self.loading("preprocessing data...") # loading screen while data is being preprocessed
    input_logs_path= self.pp.state.input_logs_path
    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value, 
        "is_xes": is_xes, 
        "save_path": f"{input_logs_path}/{self.save_path.value}" ,
        "sep": ","
    } 

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/add_unique_start_end", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code==200:
        # successful request
        data = response.json()
        container= ptg.Container( 
            ptg.Label(f"success"),
            f"log saved in path {data['save_path']}"
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
        )
    else: 
        # an error occured
        data = response.json()
        container= ptg.Container( 
            ptg.Label(f"error: {data['error']}"),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
        )

    window = ptg.Window( container, box="DOUBLE")
    window.center()
    return window

handle_remove_duplicates()

first decorator is used to ensure the file can be preprocessed

sends a request to the server with all the needed parameters to do remove duplicate rows from the log and in case of a successful computation of the request by the server the path where the preprocessed log is stored in will be indicated in a new window

if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and the user can go back to the window where the parameters are displayed

Source code in CLI/ProcessProphetPreprocessing.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
@check_types
def handle_remove_duplicates(self):
    """
    first decorator is used to ensure the file can be preprocessed

    sends a request to the server with all the needed parameters to do remove duplicate rows from the log
    and in case of a successful computation of the request by the server the path where the preprocessed log
    is stored in will be indicated in a new window

    if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
    the user can go back to the window where the parameters are displayed
    """
    self.loading("preprocessing data...") # loading screen while data is being preprocessed
    input_logs_path= self.pp.state.input_logs_path

    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value, 
        "is_xes": is_xes, 
        "save_path": f"{input_logs_path}/{self.save_path.value}" ,
        "sep": ","
    } 

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/remove_duplicates", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code==200:
        # successful and indicate path for preprocessed data
        data = response.json()
        container= ptg.Container( 
            ptg.Label(f"success"),
            f"log saved in path {data['save_path']}"
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
        )
    else:
        # error ocurred 
        data = response.json()
        container= ptg.Container( 
            ptg.Label(f"error: {data['error']}"),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
        )

    window = ptg.Window( container, box="DOUBLE")
    window.center()
    return window

handle_replace_nan_with_mode()

first decorator is used to ensure the file can be preprocessed

sends a request to the server with all the needed parameters to do replace all NaN values in the log and in case of a successful computation of the request by the server the path where the preprocessed log is stored in will be indicated in a new window

if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and the user can go back to the window where the parameters are displayed

Source code in CLI/ProcessProphetPreprocessing.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@check_types
def handle_replace_nan_with_mode(self):
    """
    first decorator is used to ensure the file can be preprocessed

    sends a request to the server with all the needed parameters to do replace all NaN values in the log
    and in case of a successful computation of the request by the server the path where the preprocessed log
    is stored in will be indicated in a new window

    if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
    the user can go back to the window where the parameters are displayed
    """ 
    self.loading("preprocessing data...") # loading screen while data is being preprocessed
    input_logs_path= self.pp.state.input_logs_path

    #: checks if extension is xes. otherwise csv assumed
    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value, 
        "is_xes": is_xes, 
        "save_path": f"{input_logs_path}/{self.save_path.value}" ,
        "sep": ","
    } 

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/replace_with_mode", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code==200:
        # succesful and indicate path for preprocessed data
        data = response.json()
        container= ptg.Container( 
            ptg.Label(f"success"),
            f"log saved in path {data['save_path']}"
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
        )
    else:
        # error ocurred 
        data = response.json()
        container= ptg.Container( 
            ptg.Label(f"error: {data['error']}"),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu())),
        )

    window = ptg.Window( container, box="DOUBLE")
    window.center()
    return window

loading(message='')

function to indicate a message in a new window e.g. to show that a process is loading

Source code in CLI/ProcessProphetPreprocessing.py
74
75
76
77
78
79
80
81
82
83
84
85
def loading(self, message = ""):
    """
    function to indicate a message in a new window e.g. to 
    show that a process is loading
    """ 
    container = ptg.Container(
        "Loading...", 
        message
    )
    window = ptg.Window(container, box="DOUBLE")
    window.center()
    self.pp.switch_window(window)

preprocessing_main_menu()

Displays the main menu for the preprocessing manager.

The user can choose one of the three alternatives
  • Replacing NaN values in the log.
  • Removing duplicate rows in the log.
  • Adding unique start and end activities to each trace.

It is also possible to return to the previous menu.

Source code in CLI/ProcessProphetPreprocessing.py
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
def preprocessing_main_menu(self):
    """
    Displays the main menu for the preprocessing manager.

    The user can choose one of the three alternatives:
        - Replacing NaN values in the log.
        - Removing duplicate rows in the log.
        - Adding unique start and end activities to each trace.

    It is also possible to return to the previous menu.
    """
    replace = f"{self.pp.button_color}replace NaN in activity column with mode"
    remove= f"{self.pp.button_color}remove duplicate rows"
    add= f"{self.pp.button_color}add unique start and end activities"

    container = ptg.Container(
        "select one action:", 
        ptg.Button(label = replace,onclick= lambda *_: self.pp.switch_window(self.replace_nan_with_mode())),
        "",
        ptg.Button(label = remove,onclick= lambda *_: self.pp.switch_window(self.remove_duplicates())),
        "",
        ptg.Button(label = add, onclick=lambda *_: self.pp.switch_window(self.add_unique_start_end())), 
        "",
        ptg.Button("back", lambda *_: self.return_to_menu())  


    )

    window = ptg.Window(container, box="DOUBLE", width= self.pp.window_width)
    window.center()
    return window

remove_duplicates()

This function indicates all the parameters that are needed to remove duplicate rows and the user can modify them in the left side of the window.

The function also indicates the first few Log file names in the current project on the right side of the window.

Side effects
  • Initializes a window with default parameters where the user can adjust them.
  • Initializes a window where all the event logs of the current project are listed that can be used for the preprocessing.
  • Calls the remove_duplicates function if the user confirms the indicated parameters.
Source code in CLI/ProcessProphetPreprocessing.py
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
def remove_duplicates(self):
    """
    This function indicates all the parameters that are needed to remove duplicate rows
    and the user can modify them in the left side of the window.

    The function also indicates the first few Log file names in the current project on
    the right side of the window.

    Side effects:
        - Initializes a window with default parameters where the user can adjust them.
        - Initializes a window where all the event logs of the current project are listed that can be used
        for the preprocessing.
        - Calls the `remove_duplicates` function if the user confirms the indicated parameters.
    """ 
    self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
    self.save_path=  ptg.InputField("HL_no_dup.csv", prompt="output log name:") # Name of the preprocessed copy of the log

    left_container = ptg.Container( 
        ptg.Label(f"enter relevant information"),
        self.log_name,
        self.case_id_key, 
        self.case_activity_key, 
        self.case_timestamp_key,
        self.save_path,
        "",
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.handle_remove_duplicates())),
        "",
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
    )

    logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
    logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

    right_container = ptg.Container(
        f"[underline]First {len(logs)} logs in project:", *logs
    ).center()

    window = ptg.Window(ptg.Splitter(left_container, right_container), width = self.pp.window_width)
    #window = ptg.Window(*container)
    window.center()
    return window

replace_nan_with_mode()

This function indicates all the parameters that are needed to replace NaN values and the user can modify them in the left side of the window.

The function also indicates the first few Log file names in the current project on the right side of the window.

Side effects
  • Initializes a window with default parameters where the user can adjust them.
  • Initializes a window where all the event logs of the current project are listed that can be used for the preprocessing.
  • Calls the replace_nan_with_mode function if the user confirms the indicated parameters.
Source code in CLI/ProcessProphetPreprocessing.py
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
def replace_nan_with_mode(self):
    """
    This function indicates all the parameters that are needed to replace NaN values
    and the user can modify them in the left side of the window.

    The function also indicates the first few Log file names in the current project on
    the right side of the window.

    Side effects:
        - Initializes a window with default parameters where the user can adjust them.
        - Initializes a window where all the event logs of the current project are listed that can be used
        for the preprocessing.
        - Calls the `replace_nan_with_mode` function if the user confirms the indicated parameters.
    """ 
    self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
    self.save_path=  ptg.InputField("HL_nan_to_mode.csv", prompt="output log name:") # Name of the preprocessed copy of the log
    #indicates params
    left_container = ptg.Container( 
        ptg.Label(f"enter relevant information"),
        self.log_name,
        self.case_id_key, 
        self.case_activity_key, 
        self.case_timestamp_key,
        self.save_path,
        "",
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.handle_replace_nan_with_mode())),
        "",
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.preprocessing_main_menu()))
    )

    logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
    logs = logs[:min(len(logs),4 )] #: to not overflow the terminal
    #shows logs in the current project
    right_container = ptg.Container(
        f"[underline]First {len(logs)} logs in project:", *logs
    ).center()

    window = ptg.Window(ptg.Splitter(left_container, right_container), width = self.pp.window_width)
    window.center()
    return window

This module supports training of the RMTPP model.

ProcessProphetTrain

This class provides three basic functions: - train RNN by setting params manually - train RNN using grid search - train RNN using random search Each one of these options generates a .pt file containing the PyTorch model and a .config.json file containing the RNN training configuration, encoders, and other data relevant to Process Prophet.

Source code in CLI/ProcessProphetTrain.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
class ProcessProphetTrain: 
    """
    This class provides three basic functions:
    - train RNN by setting params manually
    - train RNN using grid search
    - train RNN using random search
    Each one of these options generates a `.pt` file containing the PyTorch model and a 
    `.config.json` file containing the RNN training configuration, encoders, and other data
    relevant to Process Prophet.
    """
    def __init__(self, pp):
        """
        other state parameters that make sense in the context of training might also be saved 
        here
        """
        self.pp = pp #: reference to the PP object 
        #: after creating the object, set the main menu as start screen
        self.pp.switch_window(self.trainer_main_menu())


    def loading(self, message = ""): 
        """
        a loading screen 
        """
        container = ptg.Container(
            "Loading...", 
            message
        )
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        self.pp.switch_window(window)


    def return_to_menu(self):
        """
        returns to p.p. start. Start is set to False, since we dont want to select the project again. 
        this makes sense for example, when the user wants to make predictions after having trained the RNN.
        """
        pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, start = False)



    def start_training(self) : 
        """
        Carries out a training request.

        Side effects on success:
            - `model`: A model `.pt` file is saved in the models folder.
            - `config`: A model's `config.json` information for the server is saved in the models folder as a JSON file.

        The training statistics (time error, accuracy, recall, f1 score) are displayed on the screen.

        If the training is unsuccessful, the error returned by the server is displayed on the CLI.
        """
        self.loading("preprocessing data...")
        input_logs_path= self.pp.state.input_logs_path

        is_xes = True if self.log_name.value[-3:] == "xes"  else False
        cuda= True if  self.cuda.value== "True"  else False

        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
            "split": self.split.value, 
            "model_path": f"{self.pp.state.models_path}/{self.model_name.value}", 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value, 
            "cuda": cuda, 
            "seq_len": self.seq_len.value, 
            "emb_dim": self.emb_dim.value, 
            "hid_dim":self.hid_dim.value, 
            "mlp_dim":self.mlp_dim.value, 
            "lr": self.lr.value, 
            "batch_size": self.batch_size.value, 
            "epochs": self.epochs.value, 
            "is_xes": is_xes
        } 

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/train_nn", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200: 
            data = response.json()

            statistics = data["training_statistics"]


            container =[  
                "training successful", 
                f"time error: {statistics['time error']}", 
                f"accuracy: {statistics['acc']}", 
                f"recall: {statistics['recall']}", 
                f"f1-score: {statistics['f1']}", 
                ptg.Button(f"{self.pp.button_color}training menu", lambda *_: self.pp.switch_window(self.trainer_main_menu())), 
                ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
            ]
        else: 
            data = response.json()
            error = data["error"]
            container = [ 
                "training FAILED:",
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.set_training_params()))
            ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window





    def set_training_params(self):
        """
        Sets the training parameters for the model.

        This method allows the user to either start the training with the displayed default parameters or adapt the parameters
        according to their own preference.

        Side Effects:
            - The modified parameters are stored in a container and then the training function is called.
            - Parameters are displayed in the window.
            - A second window is displayed to show the logs contained in this project as a visual aid.
        """
        self.cuda = ptg.InputField("True", prompt="use cuda: ")
        self.model_name = ptg.InputField("f.pt", prompt="model name: ")
        self.seq_len = ptg.InputField("10", prompt="sequence length: ")
        self.emb_dim = ptg.InputField("32", prompt="embedding dimension: ")
        self.hid_dim = ptg.InputField("32", prompt="hidden dimension: ")
        self.mlp_dim = ptg.InputField("16", prompt="mlp dimension: ")
        self.epochs = ptg.InputField("10", prompt="number of epochs: ")
        self.batch_size = ptg.InputField("1024", prompt="batch size: ")
        self.lr = ptg.InputField("1e-3", prompt="learning rate: ")
        self.split = ptg.InputField("0.9", prompt="split fraction: ")
        self.log_name = ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key = ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key = ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key = ptg.InputField("time:timestamp", prompt="timestamp key: ")

        # Contains the form for setting the parameters
        left_container = ptg.Container(
            ptg.Label(f"Set parameters for training"),
            SERVER_NAME,
            self.cuda,
            self.model_name,
            self.seq_len,
            self.emb_dim,
            self.hid_dim,
            self.mlp_dim,
            self.lr,
            self.batch_size,
            self.epochs,
            self.split,
            self.log_name,
            self.case_id_key,
            self.case_activity_key,
            self.case_timestamp_key,
            "",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_training())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
        )

        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs), 4)]  # To not overflow the terminal

        # Contains a list of the logs contained in the input logs path
        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()

        window = ptg.Window(ptg.Splitter(left_container, right_container), width=self.pp.window_width)
        window.center()
        return window

    def start_grid_search(self):
        """
        sends a request to the server with all the needed parameters to carry out grid search training
        and in case of a successful computation of the request by the server the accuracy of the trained
        model is displayed in a new window. It is then possible to return to the action (manager selection) or training menu. 

        if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
        the user can go back to the window where the parameters are displayed
        """ 
        self.loading("preprocessing data...")

        input_logs_path= self.pp.state.input_logs_path

        #: checks the if the file extension is xes. 
        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        #: casting bool("False") also returns True 
        cuda= True if  self.cuda.value== "True"  else False

        #: search params for grid search
        sp = {
            "hid_dim":[self.hid_dim_lower.value,self.hid_dim_upper.value, self.hid_dim_step.value] ,
            "mlp_dim":[self.mlp_dim_lower.value, self.mlp_dim_upper.value, self.mlp_dim_step.value] ,
            "emb_dim":[self.emb_dim_lower.value, self.emb_dim_upper.value, self.emb_dim_step.value] 
        } 


        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
            "split": self.split.value, 
            "model_path": f"{self.pp.state.models_path}/{self.model_name.value}", 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value, 
            "cuda": cuda, 
            "seq_len": self.seq_len.value, 
            "lr": self.lr.value, 
            "batch_size": self.batch_size.value, 
            "epochs": self.epochs.value, 
            "is_xes": is_xes,
            "search_params": sp
        } 

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/grid_search", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200: 
            data = response.json()

            accuracy = data["acc"]
            #: display accuracy on success
            container =[
                "training successful", 
                f"accuracy: {accuracy}", 
                ptg.Button(f"{self.pp.button_color}training menu", lambda *_: self.pp.switch_window(self.trainer_main_menu())), 
                ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
            ]
        else: 
            #: display error on fail
            data =response.json()
            error = data["error"]
            container =[  
                "training FAILED:", 
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.set_grid_search_params()))
            ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window


    def start_random_search(self) :
        """
        sends a request to the server with all the needed parameters to do a random search training
        and in case of a successful computation of the request by the server the accuracy of the trained
        model is displayed in a new window. It is then possible to return to the action or training menu

        if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
        the user can go back to the window where the parameters are displayed
        """ 
        self.loading("preprocessing data...") #: shows the loading screen until the response es received. 

        input_logs_path= self.pp.state.input_logs_path

        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        cuda= True if  self.cuda.value== "True"  else False

        #: params for random search. here we only need lower and upper bounds
        sp = {
            "hid_dim":[self.hid_dim_lower.value,self.hid_dim_upper.value] ,
            "mlp_dim":[self.mlp_dim_lower.value, self.mlp_dim_upper.value] ,
            "emb_dim":[self.emb_dim_lower.value, self.emb_dim_upper.value] 
        } 

        #: note the iterations param needed for random search.
        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
            "split": self.split.value, 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value, 
            "cuda": cuda, 
            "model_path": f"{self.pp.state.models_path}/{self.model_name.value}", 
            "seq_len": self.seq_len.value, 
            "lr": self.lr.value, 
            "batch_size": self.batch_size.value, 
            "epochs": self.epochs.value, 
            "is_xes": is_xes,
            "search_params": sp, 
            "iterations": self.iterations.value
        } 

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/random_search", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200: 
            logger_set_params_cli.debug(response.content)
            data = response.json()

            accuracy = data["acc"]

            #: display accuracy on success
            container = [ 
                "training successful", 
                f"accuracy: {accuracy}", 
                ptg.Button(f"{self.pp.button_color}training menu", lambda *_: self.pp.switch_window(self.trainer_main_menu())), 
                ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
            ]
        else: 
            data = response.json()
            error = data["error"]
            container = [ 
                "training FAILED:", 
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.set_random_search_params()))
            ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window

    def set_random_search_params(self):
        """
        Used to set the parameters for random search training alternative.

        This function distinguishes between quick mode and advanced mode by giving more options to customize
        the hyperparameters in the advanced mode, whereas in the base mode only the most important parameters
        can be modified by the user.

        Side effects:
            - Initializes window with default parameters where the user can adjust them.
            - Initializes window where all the event logs of the current project are listed that can be used
            for the training.
            - Random search can be called if the user confirms the indicated parameters.
        """
        if self.pp.mode == ProcessProphetMode.advanced:
            #: show all params in case of advanced mode
            self.cuda=  ptg.InputField("True", prompt="use cuda: ")
            self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
            self.seq_len=  ptg.InputField("10", prompt="sequence length: ")
            self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")
            self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
            self.epochs= ptg.InputField("10", prompt="number of epochs: ")
            self.split= ptg.InputField("0.9", prompt="split fraction: ")
            self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
            self.iterations= ptg.InputField("2", prompt="iterations: ")


            self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
            self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
            self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

            self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
            self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")

            self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
            self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")


            self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
            self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")

            container = ptg.Container( 
                ptg.Label(f"set parameters for random search"),
                self.cuda , 
                self.model_name ,
                self.seq_len ,
                self.lr ,
                self.batch_size ,
                self.epochs ,
                self.split, 
                self.log_name,
                self.case_id_key, 
                self.case_activity_key, 
                self.case_timestamp_key,
                self.hid_dim_lower, 
                self.hid_dim_upper, 
                self.mlp_dim_lower, 
                self.mlp_dim_upper, 
                self.emb_dim_lower, 
                self.emb_dim_upper, 
                self.iterations,"",
                ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_random_search())),"",
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
            )

            logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
            logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

            #: shows the available logs.
            right_container = ptg.Container(
                f"[underline]First {len(logs)} logs in project:", *logs
            ).center()
            window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
            #window = ptg.Window(*container)
            window.center()
            return window
        elif self.pp.mode == ProcessProphetMode.quick:
            #: show some params in case of quick mode
            self.cuda=  ptg.InputField("False", prompt="use cuda: ") #: no cuda assumed

            self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")  #: set to 1e-3 by default, this is a usual value

            self.epochs= ptg.InputField("30", prompt="number of epochs: ") #: set to 30 by default
            self.split= ptg.InputField("0.9", prompt="split fraction: ") #: set to 0.9 by default



            self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
            self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
            self.seq_len=  ptg.InputField("10", prompt="sequence length: ") 
            self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
            self.iterations= ptg.InputField("2", prompt="iterations: ")


            self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
            self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
            self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

            self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
            self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")

            self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
            self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")


            self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
            self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")

            container = ptg.Container( 
                ptg.Label(f"set parameters for grid search"),
                self.model_name ,
                self.seq_len ,
                self.batch_size ,
                self.log_name,
                self.case_id_key, 
                self.case_activity_key, 
                self.case_timestamp_key,
                self.hid_dim_lower, 
                self.hid_dim_upper, 
                self.mlp_dim_lower, 
                self.mlp_dim_upper, 
                self.emb_dim_lower, 
                self.emb_dim_upper, 
                self.iterations,"",
                ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_random_search())),"",
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
            )

            logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
            logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

            right_container = ptg.Container(
                f"[underline]First {len(logs)} logs in project:", *logs
            ).center()
            window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
            #window = ptg.Window(*container)
            window.center()
            return window


    def set_grid_search_params(self):
        """
        Used to set the parameters for grid search training alternative.

        This function distinguishes between quick mode and advanced mode by giving more options to customize
        the hyperparameters in the advanced mode, whereas in the base mode only the most important parameters
        can be modified by the user.

        Side effects:
            - Initializes window with default parameters where the user can adjust them.
            - Initializes window where all the event logs of the current project are listed that can be used
            for the training.
            - Grid search can be called if the user confirms the indicated parameters.
        """
        if self.pp.mode == ProcessProphetMode.advanced:
            #: show all params in case of advanced mode
            self.cuda=  ptg.InputField("True", prompt="use cuda: ")
            self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
            self.seq_len=  ptg.InputField("10", prompt="sequence length: ")
            self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")
            self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
            self.epochs= ptg.InputField("10", prompt="number of epochs: ")
            self.split= ptg.InputField("0.9", prompt="split fraction: ")
            self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
            self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
            self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
            self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

            self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
            self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")
            self.hid_dim_step= ptg.InputField("50", prompt="hidden dim. step: ")

            self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
            self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")
            self.mlp_dim_step= ptg.InputField("100", prompt="mlp dim. step: ")


            self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
            self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")
            self.emb_dim_step= ptg.InputField("100", prompt="emb dim. step: ")

            container = ptg.Container( 
                ptg.Label(f"set parameters for grid search"),
                self.cuda , 
                self.model_name ,
                self.seq_len ,
                self.lr ,
                self.batch_size ,
                self.epochs ,
                self.split, 
                self.log_name,
                self.case_id_key, 
                self.case_activity_key, 
                self.case_timestamp_key,
                self.hid_dim_lower, 
                self.hid_dim_upper, 
                self.hid_dim_step, 
                self.mlp_dim_lower, 
                self.mlp_dim_upper, 
                self.mlp_dim_step, 
                self.emb_dim_lower, 
                self.emb_dim_upper, 
                self.emb_dim_step,
                ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_grid_search())),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
            ).center()


            logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
            logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

            right_container = ptg.Container(
                f"[underline]First {len(logs)} logs in project:", *logs
            ).center()
            window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
            #window = ptg.Window(*container)
            window.center()
            return window
        elif self.pp.mode  == ProcessProphetMode.quick: 
            #: show some params in case of advanced mode
            self.cuda=  ptg.InputField("True", prompt="use cuda: ")
            self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
            self.seq_len=  ptg.InputField("30", prompt="sequence length: ")
            self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")
            self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
            self.epochs= ptg.InputField("10", prompt="number of epochs: ")
            self.split= ptg.InputField("0.9", prompt="split fraction: ")
            self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
            self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
            self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
            self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

            self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
            self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")
            self.hid_dim_step= ptg.InputField("50", prompt="hidden dim. step: ")

            self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
            self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")
            self.mlp_dim_step= ptg.InputField("100", prompt="mlp dim. step: ")


            self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
            self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")
            self.emb_dim_step= ptg.InputField("100", prompt="emb dim. step: ")

            container = ptg.Container( 
                ptg.Label(f"set parameters for grid search"),
                self.model_name ,
                self.seq_len ,
                self.batch_size ,
                self.log_name,
                self.case_id_key, 
                self.case_activity_key, 
                self.case_timestamp_key,
                self.hid_dim_lower, 
                self.hid_dim_upper, 
                self.hid_dim_step, 
                self.mlp_dim_lower, 
                self.mlp_dim_upper, 
                self.mlp_dim_step, 
                self.emb_dim_lower, 
                self.emb_dim_upper, 
                self.emb_dim_step,
                ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_grid_search())),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
            ).center()


            logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
            logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

            right_container = ptg.Container(
                f"[underline]First {len(logs)} logs in project:", *logs
            ).center()
            window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
            #window = ptg.Window(*container)
            window.center()
            return window


    def trainer_main_menu(self) :
        """
        this function displays the main menuy for the trainer manager. 

        depending on the mode the current project is running in, the user can choose a training alternative
        and will be redirected to a new window where the parameters for the chosen alternative are displayed.

        it is also possible to return to the previous menu.
        """ 
        if self.pp.mode == ProcessProphetMode.advanced: 
            #: the set params manually option is only available in the advanced mode
            container = ptg.Container(
                "select one training alternative", 
                "", 
                ptg.Button(f"{self.pp.button_color}set params manually", lambda *_: self.pp.switch_window(self.set_training_params())), 
                "",
                ptg.Button(f"{self.pp.button_color}grid search", lambda *_: self.pp.switch_window(self.set_grid_search_params())), 
                "",
                ptg.Button(f"{self.pp.button_color}random search", lambda *_: self.pp.switch_window(self.set_random_search_params())),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.return_to_menu())
            )

            window = ptg.Window(*container, box="DOUBLE")
            window.center()

            return window

        elif self.pp.mode == ProcessProphetMode.quick: 
            #: only grid search and random search are available in this mode.
            container = ptg.Container(
                "select one training alternative", 
                "",
                ptg.Button(f"{self.pp.button_color}grid search", lambda *_: self.pp.switch_window(self.set_grid_search_params())), 
                "",
                ptg.Button(f"{self.pp.button_color}random search", lambda *_: self.pp.switch_window(self.set_random_search_params())),
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.return_to_menu())
            )

            window = ptg.Window(*container, box="DOUBLE")
            window.center()

            return window

__init__(pp)

other state parameters that make sense in the context of training might also be saved here

Source code in CLI/ProcessProphetTrain.py
32
33
34
35
36
37
38
39
def __init__(self, pp):
    """
    other state parameters that make sense in the context of training might also be saved 
    here
    """
    self.pp = pp #: reference to the PP object 
    #: after creating the object, set the main menu as start screen
    self.pp.switch_window(self.trainer_main_menu())

loading(message='')

a loading screen

Source code in CLI/ProcessProphetTrain.py
42
43
44
45
46
47
48
49
50
51
52
def loading(self, message = ""): 
    """
    a loading screen 
    """
    container = ptg.Container(
        "Loading...", 
        message
    )
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    self.pp.switch_window(window)

return_to_menu()

returns to p.p. start. Start is set to False, since we dont want to select the project again. this makes sense for example, when the user wants to make predictions after having trained the RNN.

Source code in CLI/ProcessProphetTrain.py
55
56
57
58
59
60
def return_to_menu(self):
    """
    returns to p.p. start. Start is set to False, since we dont want to select the project again. 
    this makes sense for example, when the user wants to make predictions after having trained the RNN.
    """
    pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, start = False)

set_grid_search_params()

Used to set the parameters for grid search training alternative.

This function distinguishes between quick mode and advanced mode by giving more options to customize the hyperparameters in the advanced mode, whereas in the base mode only the most important parameters can be modified by the user.

Side effects
  • Initializes window with default parameters where the user can adjust them.
  • Initializes window where all the event logs of the current project are listed that can be used for the training.
  • Grid search can be called if the user confirms the indicated parameters.
Source code in CLI/ProcessProphetTrain.py
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
def set_grid_search_params(self):
    """
    Used to set the parameters for grid search training alternative.

    This function distinguishes between quick mode and advanced mode by giving more options to customize
    the hyperparameters in the advanced mode, whereas in the base mode only the most important parameters
    can be modified by the user.

    Side effects:
        - Initializes window with default parameters where the user can adjust them.
        - Initializes window where all the event logs of the current project are listed that can be used
        for the training.
        - Grid search can be called if the user confirms the indicated parameters.
    """
    if self.pp.mode == ProcessProphetMode.advanced:
        #: show all params in case of advanced mode
        self.cuda=  ptg.InputField("True", prompt="use cuda: ")
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.seq_len=  ptg.InputField("10", prompt="sequence length: ")
        self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")
        self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
        self.epochs= ptg.InputField("10", prompt="number of epochs: ")
        self.split= ptg.InputField("0.9", prompt="split fraction: ")
        self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

        self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
        self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")
        self.hid_dim_step= ptg.InputField("50", prompt="hidden dim. step: ")

        self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
        self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")
        self.mlp_dim_step= ptg.InputField("100", prompt="mlp dim. step: ")


        self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
        self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")
        self.emb_dim_step= ptg.InputField("100", prompt="emb dim. step: ")

        container = ptg.Container( 
            ptg.Label(f"set parameters for grid search"),
            self.cuda , 
            self.model_name ,
            self.seq_len ,
            self.lr ,
            self.batch_size ,
            self.epochs ,
            self.split, 
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.hid_dim_lower, 
            self.hid_dim_upper, 
            self.hid_dim_step, 
            self.mlp_dim_lower, 
            self.mlp_dim_upper, 
            self.mlp_dim_step, 
            self.emb_dim_lower, 
            self.emb_dim_upper, 
            self.emb_dim_step,
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_grid_search())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
        ).center()


        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()
        window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
        #window = ptg.Window(*container)
        window.center()
        return window
    elif self.pp.mode  == ProcessProphetMode.quick: 
        #: show some params in case of advanced mode
        self.cuda=  ptg.InputField("True", prompt="use cuda: ")
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.seq_len=  ptg.InputField("30", prompt="sequence length: ")
        self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")
        self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
        self.epochs= ptg.InputField("10", prompt="number of epochs: ")
        self.split= ptg.InputField("0.9", prompt="split fraction: ")
        self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

        self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
        self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")
        self.hid_dim_step= ptg.InputField("50", prompt="hidden dim. step: ")

        self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
        self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")
        self.mlp_dim_step= ptg.InputField("100", prompt="mlp dim. step: ")


        self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
        self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")
        self.emb_dim_step= ptg.InputField("100", prompt="emb dim. step: ")

        container = ptg.Container( 
            ptg.Label(f"set parameters for grid search"),
            self.model_name ,
            self.seq_len ,
            self.batch_size ,
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.hid_dim_lower, 
            self.hid_dim_upper, 
            self.hid_dim_step, 
            self.mlp_dim_lower, 
            self.mlp_dim_upper, 
            self.mlp_dim_step, 
            self.emb_dim_lower, 
            self.emb_dim_upper, 
            self.emb_dim_step,
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_grid_search())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
        ).center()


        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()
        window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
        #window = ptg.Window(*container)
        window.center()
        return window

set_random_search_params()

Used to set the parameters for random search training alternative.

This function distinguishes between quick mode and advanced mode by giving more options to customize the hyperparameters in the advanced mode, whereas in the base mode only the most important parameters can be modified by the user.

Side effects
  • Initializes window with default parameters where the user can adjust them.
  • Initializes window where all the event logs of the current project are listed that can be used for the training.
  • Random search can be called if the user confirms the indicated parameters.
Source code in CLI/ProcessProphetTrain.py
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
def set_random_search_params(self):
    """
    Used to set the parameters for random search training alternative.

    This function distinguishes between quick mode and advanced mode by giving more options to customize
    the hyperparameters in the advanced mode, whereas in the base mode only the most important parameters
    can be modified by the user.

    Side effects:
        - Initializes window with default parameters where the user can adjust them.
        - Initializes window where all the event logs of the current project are listed that can be used
        for the training.
        - Random search can be called if the user confirms the indicated parameters.
    """
    if self.pp.mode == ProcessProphetMode.advanced:
        #: show all params in case of advanced mode
        self.cuda=  ptg.InputField("True", prompt="use cuda: ")
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.seq_len=  ptg.InputField("10", prompt="sequence length: ")
        self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")
        self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
        self.epochs= ptg.InputField("10", prompt="number of epochs: ")
        self.split= ptg.InputField("0.9", prompt="split fraction: ")
        self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.iterations= ptg.InputField("2", prompt="iterations: ")


        self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

        self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
        self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")

        self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
        self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")


        self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
        self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")

        container = ptg.Container( 
            ptg.Label(f"set parameters for random search"),
            self.cuda , 
            self.model_name ,
            self.seq_len ,
            self.lr ,
            self.batch_size ,
            self.epochs ,
            self.split, 
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.hid_dim_lower, 
            self.hid_dim_upper, 
            self.mlp_dim_lower, 
            self.mlp_dim_upper, 
            self.emb_dim_lower, 
            self.emb_dim_upper, 
            self.iterations,"",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_random_search())),"",
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
        )

        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

        #: shows the available logs.
        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()
        window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
        #window = ptg.Window(*container)
        window.center()
        return window
    elif self.pp.mode == ProcessProphetMode.quick:
        #: show some params in case of quick mode
        self.cuda=  ptg.InputField("False", prompt="use cuda: ") #: no cuda assumed

        self.lr=  ptg.InputField("1e-3", prompt="learning rate: ")  #: set to 1e-3 by default, this is a usual value

        self.epochs= ptg.InputField("30", prompt="number of epochs: ") #: set to 30 by default
        self.split= ptg.InputField("0.9", prompt="split fraction: ") #: set to 0.9 by default



        self.batch_size=  ptg.InputField("1024", prompt="batch size: ")
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.seq_len=  ptg.InputField("10", prompt="sequence length: ") 
        self.log_name= ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.iterations= ptg.InputField("2", prompt="iterations: ")


        self.case_id_key= ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key= ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key= ptg.InputField("time:timestamp", prompt="timestamp key: ")

        self.hid_dim_lower= ptg.InputField("100", prompt="hidden dim. lower bound: ")
        self.hid_dim_upper= ptg.InputField("200", prompt="hidden dim. upper bound: ")

        self.mlp_dim_lower= ptg.InputField("100", prompt="mlp dim. lower bound: ")
        self.mlp_dim_upper= ptg.InputField("200", prompt="mlp dim. upper bound: ")


        self.emb_dim_lower= ptg.InputField("100", prompt="emb dim. lower bound: ")
        self.emb_dim_upper= ptg.InputField("200", prompt="emb dim. upper bound: ")

        container = ptg.Container( 
            ptg.Label(f"set parameters for grid search"),
            self.model_name ,
            self.seq_len ,
            self.batch_size ,
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.hid_dim_lower, 
            self.hid_dim_upper, 
            self.mlp_dim_lower, 
            self.mlp_dim_upper, 
            self.emb_dim_lower, 
            self.emb_dim_upper, 
            self.iterations,"",
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_random_search())),"",
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
        )

        logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
        logs = logs[:min(len(logs),4 )] #: to not overflow the terminal

        right_container = ptg.Container(
            f"[underline]First {len(logs)} logs in project:", *logs
        ).center()
        window = ptg.Window(ptg.Splitter(container, right_container), width = self.pp.window_width)
        #window = ptg.Window(*container)
        window.center()
        return window

set_training_params()

Sets the training parameters for the model.

This method allows the user to either start the training with the displayed default parameters or adapt the parameters according to their own preference.

Side Effects
  • The modified parameters are stored in a container and then the training function is called.
  • Parameters are displayed in the window.
  • A second window is displayed to show the logs contained in this project as a visual aid.
Source code in CLI/ProcessProphetTrain.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def set_training_params(self):
    """
    Sets the training parameters for the model.

    This method allows the user to either start the training with the displayed default parameters or adapt the parameters
    according to their own preference.

    Side Effects:
        - The modified parameters are stored in a container and then the training function is called.
        - Parameters are displayed in the window.
        - A second window is displayed to show the logs contained in this project as a visual aid.
    """
    self.cuda = ptg.InputField("True", prompt="use cuda: ")
    self.model_name = ptg.InputField("f.pt", prompt="model name: ")
    self.seq_len = ptg.InputField("10", prompt="sequence length: ")
    self.emb_dim = ptg.InputField("32", prompt="embedding dimension: ")
    self.hid_dim = ptg.InputField("32", prompt="hidden dimension: ")
    self.mlp_dim = ptg.InputField("16", prompt="mlp dimension: ")
    self.epochs = ptg.InputField("10", prompt="number of epochs: ")
    self.batch_size = ptg.InputField("1024", prompt="batch size: ")
    self.lr = ptg.InputField("1e-3", prompt="learning rate: ")
    self.split = ptg.InputField("0.9", prompt="split fraction: ")
    self.log_name = ptg.InputField("Hospital_log.xes", prompt="log name: ")
    self.case_id_key = ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key = ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key = ptg.InputField("time:timestamp", prompt="timestamp key: ")

    # Contains the form for setting the parameters
    left_container = ptg.Container(
        ptg.Label(f"Set parameters for training"),
        SERVER_NAME,
        self.cuda,
        self.model_name,
        self.seq_len,
        self.emb_dim,
        self.hid_dim,
        self.mlp_dim,
        self.lr,
        self.batch_size,
        self.epochs,
        self.split,
        self.log_name,
        self.case_id_key,
        self.case_activity_key,
        self.case_timestamp_key,
        "",
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.start_training())),
        "",
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.trainer_main_menu()))
    )

    logs = [log for log in os.listdir(self.pp.state.input_logs_path)]
    logs = logs[:min(len(logs), 4)]  # To not overflow the terminal

    # Contains a list of the logs contained in the input logs path
    right_container = ptg.Container(
        f"[underline]First {len(logs)} logs in project:", *logs
    ).center()

    window = ptg.Window(ptg.Splitter(left_container, right_container), width=self.pp.window_width)
    window.center()
    return window

sends a request to the server with all the needed parameters to carry out grid search training and in case of a successful computation of the request by the server the accuracy of the trained model is displayed in a new window. It is then possible to return to the action (manager selection) or training menu.

if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and the user can go back to the window where the parameters are displayed

Source code in CLI/ProcessProphetTrain.py
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
def start_grid_search(self):
    """
    sends a request to the server with all the needed parameters to carry out grid search training
    and in case of a successful computation of the request by the server the accuracy of the trained
    model is displayed in a new window. It is then possible to return to the action (manager selection) or training menu. 

    if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
    the user can go back to the window where the parameters are displayed
    """ 
    self.loading("preprocessing data...")

    input_logs_path= self.pp.state.input_logs_path

    #: checks the if the file extension is xes. 
    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    #: casting bool("False") also returns True 
    cuda= True if  self.cuda.value== "True"  else False

    #: search params for grid search
    sp = {
        "hid_dim":[self.hid_dim_lower.value,self.hid_dim_upper.value, self.hid_dim_step.value] ,
        "mlp_dim":[self.mlp_dim_lower.value, self.mlp_dim_upper.value, self.mlp_dim_step.value] ,
        "emb_dim":[self.emb_dim_lower.value, self.emb_dim_upper.value, self.emb_dim_step.value] 
    } 


    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
        "split": self.split.value, 
        "model_path": f"{self.pp.state.models_path}/{self.model_name.value}", 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value, 
        "cuda": cuda, 
        "seq_len": self.seq_len.value, 
        "lr": self.lr.value, 
        "batch_size": self.batch_size.value, 
        "epochs": self.epochs.value, 
        "is_xes": is_xes,
        "search_params": sp
    } 

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/grid_search", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200: 
        data = response.json()

        accuracy = data["acc"]
        #: display accuracy on success
        container =[
            "training successful", 
            f"accuracy: {accuracy}", 
            ptg.Button(f"{self.pp.button_color}training menu", lambda *_: self.pp.switch_window(self.trainer_main_menu())), 
            ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
        ]
    else: 
        #: display error on fail
        data =response.json()
        error = data["error"]
        container =[  
            "training FAILED:", 
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.set_grid_search_params()))
        ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

sends a request to the server with all the needed parameters to do a random search training and in case of a successful computation of the request by the server the accuracy of the trained model is displayed in a new window. It is then possible to return to the action or training menu

if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and the user can go back to the window where the parameters are displayed

Source code in CLI/ProcessProphetTrain.py
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
def start_random_search(self) :
    """
    sends a request to the server with all the needed parameters to do a random search training
    and in case of a successful computation of the request by the server the accuracy of the trained
    model is displayed in a new window. It is then possible to return to the action or training menu

    if the request fails because e.g. it exceeds the timeout of TIMEOUT the error is displayed in a new window and 
    the user can go back to the window where the parameters are displayed
    """ 
    self.loading("preprocessing data...") #: shows the loading screen until the response es received. 

    input_logs_path= self.pp.state.input_logs_path

    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    cuda= True if  self.cuda.value== "True"  else False

    #: params for random search. here we only need lower and upper bounds
    sp = {
        "hid_dim":[self.hid_dim_lower.value,self.hid_dim_upper.value] ,
        "mlp_dim":[self.mlp_dim_lower.value, self.mlp_dim_upper.value] ,
        "emb_dim":[self.emb_dim_lower.value, self.emb_dim_upper.value] 
    } 

    #: note the iterations param needed for random search.
    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
        "split": self.split.value, 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value, 
        "cuda": cuda, 
        "model_path": f"{self.pp.state.models_path}/{self.model_name.value}", 
        "seq_len": self.seq_len.value, 
        "lr": self.lr.value, 
        "batch_size": self.batch_size.value, 
        "epochs": self.epochs.value, 
        "is_xes": is_xes,
        "search_params": sp, 
        "iterations": self.iterations.value
    } 

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/random_search", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200: 
        logger_set_params_cli.debug(response.content)
        data = response.json()

        accuracy = data["acc"]

        #: display accuracy on success
        container = [ 
            "training successful", 
            f"accuracy: {accuracy}", 
            ptg.Button(f"{self.pp.button_color}training menu", lambda *_: self.pp.switch_window(self.trainer_main_menu())), 
            ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
        ]
    else: 
        data = response.json()
        error = data["error"]
        container = [ 
            "training FAILED:", 
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.set_random_search_params()))
        ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

start_training()

Carries out a training request.

Side effects on success
  • model: A model .pt file is saved in the models folder.
  • config: A model's config.json information for the server is saved in the models folder as a JSON file.

The training statistics (time error, accuracy, recall, f1 score) are displayed on the screen.

If the training is unsuccessful, the error returned by the server is displayed on the CLI.

Source code in CLI/ProcessProphetTrain.py
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def start_training(self) : 
    """
    Carries out a training request.

    Side effects on success:
        - `model`: A model `.pt` file is saved in the models folder.
        - `config`: A model's `config.json` information for the server is saved in the models folder as a JSON file.

    The training statistics (time error, accuracy, recall, f1 score) are displayed on the screen.

    If the training is unsuccessful, the error returned by the server is displayed on the CLI.
    """
    self.loading("preprocessing data...")
    input_logs_path= self.pp.state.input_logs_path

    is_xes = True if self.log_name.value[-3:] == "xes"  else False
    cuda= True if  self.cuda.value== "True"  else False

    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" , 
        "split": self.split.value, 
        "model_path": f"{self.pp.state.models_path}/{self.model_name.value}", 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value, 
        "cuda": cuda, 
        "seq_len": self.seq_len.value, 
        "emb_dim": self.emb_dim.value, 
        "hid_dim":self.hid_dim.value, 
        "mlp_dim":self.mlp_dim.value, 
        "lr": self.lr.value, 
        "batch_size": self.batch_size.value, 
        "epochs": self.epochs.value, 
        "is_xes": is_xes
    } 

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/train_nn", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200: 
        data = response.json()

        statistics = data["training_statistics"]


        container =[  
            "training successful", 
            f"time error: {statistics['time error']}", 
            f"accuracy: {statistics['acc']}", 
            f"recall: {statistics['recall']}", 
            f"f1-score: {statistics['f1']}", 
            ptg.Button(f"{self.pp.button_color}training menu", lambda *_: self.pp.switch_window(self.trainer_main_menu())), 
            ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
        ]
    else: 
        data = response.json()
        error = data["error"]
        container = [ 
            "training FAILED:",
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.set_training_params()))
        ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

trainer_main_menu()

this function displays the main menuy for the trainer manager.

depending on the mode the current project is running in, the user can choose a training alternative and will be redirected to a new window where the parameters for the chosen alternative are displayed.

it is also possible to return to the previous menu.

Source code in CLI/ProcessProphetTrain.py
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
def trainer_main_menu(self) :
    """
    this function displays the main menuy for the trainer manager. 

    depending on the mode the current project is running in, the user can choose a training alternative
    and will be redirected to a new window where the parameters for the chosen alternative are displayed.

    it is also possible to return to the previous menu.
    """ 
    if self.pp.mode == ProcessProphetMode.advanced: 
        #: the set params manually option is only available in the advanced mode
        container = ptg.Container(
            "select one training alternative", 
            "", 
            ptg.Button(f"{self.pp.button_color}set params manually", lambda *_: self.pp.switch_window(self.set_training_params())), 
            "",
            ptg.Button(f"{self.pp.button_color}grid search", lambda *_: self.pp.switch_window(self.set_grid_search_params())), 
            "",
            ptg.Button(f"{self.pp.button_color}random search", lambda *_: self.pp.switch_window(self.set_random_search_params())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.return_to_menu())
        )

        window = ptg.Window(*container, box="DOUBLE")
        window.center()

        return window

    elif self.pp.mode == ProcessProphetMode.quick: 
        #: only grid search and random search are available in this mode.
        container = ptg.Container(
            "select one training alternative", 
            "",
            ptg.Button(f"{self.pp.button_color}grid search", lambda *_: self.pp.switch_window(self.set_grid_search_params())), 
            "",
            ptg.Button(f"{self.pp.button_color}random search", lambda *_: self.pp.switch_window(self.set_random_search_params())),
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.return_to_menu())
        )

        window = ptg.Window(*container, box="DOUBLE")
        window.center()

        return window

This modules allows prediction generation.

ProcessProphetPredict

Source code in CLI/ProcessProphetPredict.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
class ProcessProphetPredict: 
    def __init__(self, pp):
        """
        Initializes a ProcessProphetPredict instance and prediction main menu.

        Args:
            pp (ProcessProphet): The ProcessProphet instance in charge of window management.
        """
        self.pp = pp
        self.pp.switch_window(self.prediction_main_menu())

    def prediction_main_menu(self) :
        """
        menu that returns the window of the selected prediction
        """ 
        container = ptg.Container(
            "select a prediction generation method", 
            "", 
            ptg.Button(f"{self.pp.button_color}single prediction", lambda *_: self.pp.switch_window(self.set_single_prediction_params())), 
            "",
            ptg.Button(f"{self.pp.button_color}multiple prediction", lambda *_: self.pp.switch_window(self.set_multiple_prediction_params())), 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.return_to_menu()), 
        )

        window = ptg.Window(container, box="DOUBLE")
        window.center()
        return window

    def loading(self, message = ""): 
        """
        a loading screen 
        """
        container = ptg.Container(
            "Loading...", 
            message
        )
        window = ptg.Window(container, box="DOUBLE")
        window.center()
        self.pp.switch_window(window)


    def get_single_prediction(self) : 
        """
        carries out a single prediction request. 

        Side effects: 
            - marker, timestamp and the probability of the single prediction are displayed
        """
        self.loading("predicting next event...")
        input_logs_path= self.pp.state.partial_traces_path
        # checks the if the file extension is xes
        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        #: parameters that are passed to the server for further computations
        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" ,  
            "path_to_model": f"{self.pp.state.models_path}/{self.model_name.value}", 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value,  
            "is_xes": is_xes,
            "config": f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
        }


        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/single_prediction", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200: 
            data = response.json()

            statistics = data

            #: container to display the computed event, time and its probability
            container = ptg.Container(
                "single prediction successful", 
                f"predicted time: {statistics['predicted_time']}", 
                f"predicted event: {statistics['predicted_event']}",
                f"probability: {statistics['probability']}%",
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_:self.pp.switch_window(self.prediction_main_menu())), 
                "",
                ptg.Button(f"{self.pp.button_color}return to menu", lambda *_:self.return_to_menu()), 
            )
        else: 
            data = response.json()
            error = data["error"]
            #: container to display that an error occured in the request
            container = ptg.Container(
                "single prediction FAILED:",
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
            )
        window = ptg.Window(container, box="DOUBLE")
        window.center()
        return window

    def set_single_prediction_params(self):
        """
        user can modify the given parameters for a single prediction which are then stored in the container and also displayed in the current window

        user can also start the prediction with the continue button or return to the previous menu with the back button
        """


        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.log_name=  ptg.InputField("partial_input.csv", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")

        container = [
            ptg.Label(f"set parameters for prediction"), 
            self.model_name ,
            self.log_name,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.get_single_prediction())),
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
            ]
        # display the container with the parameters
        window = ptg.Window(*container)
        window.center()
        return window

    def get_multiple_prediction(self) : 
        """
        Carries out a multiple prediction request.

        Side effects:
            - Markers and timestamps of the multiple prediction are displayed in a separate file.
        """
        self.loading("predicting next event...")
        input_logs_path= self.pp.state.partial_traces_path
        #: checks the if the file extension is xes
        is_xes = True if self.log_name.value[-3:] == "xes"  else False
        #: parameters that are passed to the server for further computations
        params = {
            "path_to_log": f"{input_logs_path}/{self.log_name.value}" ,  
            "path_to_model": f"{self.pp.state.models_path}/{self.model_name.value}", 
            "case_id": self.case_id_key.value, 
            "activity_key":  self.case_activity_key.value, 
            "timestamp_key":  self.case_timestamp_key.value,  
            "is_xes": is_xes,
            "config": f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
            "depth": self.depth.value,
            "degree": self.degree.value, 
            "prediction_file_name": f"{self.pp.state.multiple_predictions_path}/{self.prediction_file_name_input.value}"
        }

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/multiple_prediction", 
            json= params,
            timeout =8000
        )
        if response.status_code == 200: 
            data = response.json()

            paths = data
            #: container that indicates success of the request and shows the filename where the predictions are stored 
            container = ptg.Container(
                f"Multiple predictions stored in {params['prediction_file_name']}", 
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
            )
        else: 
            data = response.json()
            error = data["error"]
            #: container that indicates an error that occured from the request
            container = ptg.Container(
                "multiple prediction FAILED:",
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
            )
        window = ptg.Window(container, box="DOUBLE")
        window.center()
        return window

    def set_multiple_prediction_params(self):
        """
        function to display the default values for a multiple prediction and grants the user access to 
        modify the given parameters for multiple predictions which are then stored in the container and also displayed in the current window

        user can also start the prediction with the continue button or return to the previous menu with the back button
        """
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.log_name=  ptg.InputField("partial_input.csv", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
        self.depth= ptg.InputField("5", prompt="depth: ") #: amount of following events that should be predicted
        self.degree= ptg.InputField("3", prompt="degree: ") #: pick the k most likely ones.
        self.prediction_file_name_input = ptg.InputField("mp1.json", prompt= "predictions file name: ")

        container = [
            ptg.Label(f"set parameters for prediction"), 
            self.model_name ,
            self.log_name,
            self.prediction_file_name_input,
            self.case_id_key, 
            self.case_activity_key, 
            self.case_timestamp_key,
            self.depth,
            self.degree,
            ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.get_multiple_prediction())), 
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
            ]
        #: display the container with the parameters
        window = ptg.Window(*container)
        window.center()
        return window

    def return_to_menu(self):
        """
        returns to p.p. start
        """
        pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, False)

__init__(pp)

Initializes a ProcessProphetPredict instance and prediction main menu.

Parameters:

Name Type Description Default
pp ProcessProphet

The ProcessProphet instance in charge of window management.

required
Source code in CLI/ProcessProphetPredict.py
17
18
19
20
21
22
23
24
25
def __init__(self, pp):
    """
    Initializes a ProcessProphetPredict instance and prediction main menu.

    Args:
        pp (ProcessProphet): The ProcessProphet instance in charge of window management.
    """
    self.pp = pp
    self.pp.switch_window(self.prediction_main_menu())

get_multiple_prediction()

Carries out a multiple prediction request.

Side effects
  • Markers and timestamps of the multiple prediction are displayed in a separate file.
Source code in CLI/ProcessProphetPredict.py
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def get_multiple_prediction(self) : 
    """
    Carries out a multiple prediction request.

    Side effects:
        - Markers and timestamps of the multiple prediction are displayed in a separate file.
    """
    self.loading("predicting next event...")
    input_logs_path= self.pp.state.partial_traces_path
    #: checks the if the file extension is xes
    is_xes = True if self.log_name.value[-3:] == "xes"  else False
    #: parameters that are passed to the server for further computations
    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" ,  
        "path_to_model": f"{self.pp.state.models_path}/{self.model_name.value}", 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value,  
        "is_xes": is_xes,
        "config": f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
        "depth": self.depth.value,
        "degree": self.degree.value, 
        "prediction_file_name": f"{self.pp.state.multiple_predictions_path}/{self.prediction_file_name_input.value}"
    }

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/multiple_prediction", 
        json= params,
        timeout =8000
    )
    if response.status_code == 200: 
        data = response.json()

        paths = data
        #: container that indicates success of the request and shows the filename where the predictions are stored 
        container = ptg.Container(
            f"Multiple predictions stored in {params['prediction_file_name']}", 
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
        )
    else: 
        data = response.json()
        error = data["error"]
        #: container that indicates an error that occured from the request
        container = ptg.Container(
            "multiple prediction FAILED:",
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
        )
    window = ptg.Window(container, box="DOUBLE")
    window.center()
    return window

get_single_prediction()

carries out a single prediction request.

Side effects
  • marker, timestamp and the probability of the single prediction are displayed
Source code in CLI/ProcessProphetPredict.py
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
def get_single_prediction(self) : 
    """
    carries out a single prediction request. 

    Side effects: 
        - marker, timestamp and the probability of the single prediction are displayed
    """
    self.loading("predicting next event...")
    input_logs_path= self.pp.state.partial_traces_path
    # checks the if the file extension is xes
    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    #: parameters that are passed to the server for further computations
    params = {
        "path_to_log": f"{input_logs_path}/{self.log_name.value}" ,  
        "path_to_model": f"{self.pp.state.models_path}/{self.model_name.value}", 
        "case_id": self.case_id_key.value, 
        "activity_key":  self.case_activity_key.value, 
        "timestamp_key":  self.case_timestamp_key.value,  
        "is_xes": is_xes,
        "config": f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
    }


    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/single_prediction", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200: 
        data = response.json()

        statistics = data

        #: container to display the computed event, time and its probability
        container = ptg.Container(
            "single prediction successful", 
            f"predicted time: {statistics['predicted_time']}", 
            f"predicted event: {statistics['predicted_event']}",
            f"probability: {statistics['probability']}%",
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_:self.pp.switch_window(self.prediction_main_menu())), 
            "",
            ptg.Button(f"{self.pp.button_color}return to menu", lambda *_:self.return_to_menu()), 
        )
    else: 
        data = response.json()
        error = data["error"]
        #: container to display that an error occured in the request
        container = ptg.Container(
            "single prediction FAILED:",
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
        )
    window = ptg.Window(container, box="DOUBLE")
    window.center()
    return window

loading(message='')

a loading screen

Source code in CLI/ProcessProphetPredict.py
45
46
47
48
49
50
51
52
53
54
55
def loading(self, message = ""): 
    """
    a loading screen 
    """
    container = ptg.Container(
        "Loading...", 
        message
    )
    window = ptg.Window(container, box="DOUBLE")
    window.center()
    self.pp.switch_window(window)

prediction_main_menu()

menu that returns the window of the selected prediction

Source code in CLI/ProcessProphetPredict.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
def prediction_main_menu(self) :
    """
    menu that returns the window of the selected prediction
    """ 
    container = ptg.Container(
        "select a prediction generation method", 
        "", 
        ptg.Button(f"{self.pp.button_color}single prediction", lambda *_: self.pp.switch_window(self.set_single_prediction_params())), 
        "",
        ptg.Button(f"{self.pp.button_color}multiple prediction", lambda *_: self.pp.switch_window(self.set_multiple_prediction_params())), 
        "",
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.return_to_menu()), 
    )

    window = ptg.Window(container, box="DOUBLE")
    window.center()
    return window

return_to_menu()

returns to p.p. start

Source code in CLI/ProcessProphetPredict.py
235
236
237
238
239
def return_to_menu(self):
    """
    returns to p.p. start
    """
    pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, False)

set_multiple_prediction_params()

function to display the default values for a multiple prediction and grants the user access to modify the given parameters for multiple predictions which are then stored in the container and also displayed in the current window

user can also start the prediction with the continue button or return to the previous menu with the back button

Source code in CLI/ProcessProphetPredict.py
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def set_multiple_prediction_params(self):
    """
    function to display the default values for a multiple prediction and grants the user access to 
    modify the given parameters for multiple predictions which are then stored in the container and also displayed in the current window

    user can also start the prediction with the continue button or return to the previous menu with the back button
    """
    self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
    self.log_name=  ptg.InputField("partial_input.csv", prompt="log name: ")
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
    self.depth= ptg.InputField("5", prompt="depth: ") #: amount of following events that should be predicted
    self.degree= ptg.InputField("3", prompt="degree: ") #: pick the k most likely ones.
    self.prediction_file_name_input = ptg.InputField("mp1.json", prompt= "predictions file name: ")

    container = [
        ptg.Label(f"set parameters for prediction"), 
        self.model_name ,
        self.log_name,
        self.prediction_file_name_input,
        self.case_id_key, 
        self.case_activity_key, 
        self.case_timestamp_key,
        self.depth,
        self.degree,
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.get_multiple_prediction())), 
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
        ]
    #: display the container with the parameters
    window = ptg.Window(*container)
    window.center()
    return window

set_single_prediction_params()

user can modify the given parameters for a single prediction which are then stored in the container and also displayed in the current window

user can also start the prediction with the continue button or return to the previous menu with the back button

Source code in CLI/ProcessProphetPredict.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def set_single_prediction_params(self):
    """
    user can modify the given parameters for a single prediction which are then stored in the container and also displayed in the current window

    user can also start the prediction with the continue button or return to the previous menu with the back button
    """


    self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
    self.log_name=  ptg.InputField("partial_input.csv", prompt="log name: ")
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")

    container = [
        ptg.Label(f"set parameters for prediction"), 
        self.model_name ,
        self.log_name,
        self.case_id_key, 
        self.case_activity_key, 
        self.case_timestamp_key,
        ptg.Button(f"{self.pp.button_color}continue", lambda *_: self.pp.switch_window(self.get_single_prediction())),
        ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.prediction_main_menu()))
        ]
    # display the container with the parameters
    window = ptg.Window(*container)
    window.center()
    return window

This modules allows conformance checking, predictive log generation and process mining.

ProcessProphetModel

Source code in CLI/ProcessProphetModel.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
class ProcessProphetModel:

    def __init__(self, pp):
        """
        Initialize ProcessProphet instance and model main menu.

        Args:
            pp (ProcessProphet): The ProcessProphet instance in charge of window management.

        Other state parameters that make sense in the context of conformance checking might also be saved here.
        """
        self.pp = pp
        self.pp.switch_window(self.model_main_menu())


    def return_to_menu(self):
        """
        returns to p.p. start
        """
        pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, False)

    def loading(self, message = ""): 
        """
        a loading screen 
        """
        container = ptg.Container(
            "Loading...", 
            message
        )
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        self.pp.switch_window(window)

    def model_main_menu(self):
        """
        menu to select one of the process mining, conformance checking, creation of a predictive log
        or go back to the previous menu
        """
        #: container to indicate the different options of this part of the CLI
        container = ptg.Container(
            "Select one action", 
            "", 
            ptg.Button(f"{self.pp.button_color}Create a predictive event log", lambda *_: self.pp.switch_window(self.set_predictive_log())), 
            "", 
            ptg.Button(f"{self.pp.button_color}Run process mining", lambda *_: self.pp.switch_window(self.set_process_mining())), 
            "", 
            ptg.Button(f"{self.pp.button_color}Run conformance checking", lambda *_: self.pp.switch_window(self.set_conformance_checking())), 
            "",
            ptg.Button(f"{self.pp.button_color}Back", lambda *_: self.return_to_menu()), 
        )

        window = ptg.Window(container, box="DOUBLE")
        window.center()
        return window 

    def get_predictive_log(self): 
        """
        Sends a request to create a predictive log to the server with the previously confirmed
        parameters.

        Side effects:
            - The predictive log that the RNN computed is stored in the predictive_logs directory of the current project,
            and the user can return to previous menus.
            - If unsuccessful, an error is indicated and the user can return to the model menu.
        """
        self.loading("preprocessing data...")
        #: check the file type of the input log
        is_xes = True if self.log_name.value[-3:] == "xes"  else False

        params = {
            "path_to_model":f"{self.pp.state.models_path}/{self.model_name.value}",
            "path_to_log":f"{self.pp.state.input_logs_path}/{self.log_name.value}" ,
            "case_id":self.case_id_key.value,
            "activity_key":self.case_activity_key.value,
            "timestamp_key":self.case_timestamp_key.value,
            "new_log_path":f"{self.pp.state.predictive_logs_path}/{self.predictive_event_log_name.value}",
            "non_stop":True if self.non_stop.value== "True" else False,
            "upper":self.upper .value,
            "random_cuts": True if self.random_cuts.value == "True" else False,
            "cut_length":self.cut_length.value,
            "config":f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
            "is_xes": is_xes, 
            "sep": self.sep.value
        }  

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/generate_predictive_log", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200:
            #: container to indicate successful generation of predictive log 
            data = response.json()
            container =[  
                "predictive process model generated successfully", 
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu())), 
                "",
                ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
            ]
        else:
            #: container to indicate that an error occurred from the request to the server 
            data = response.json()
            error = data["error"]
            container = [ 
                "training FAILED:",
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu()))
            ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window


    def set_predictive_log(self):
        """
        User can either start generating a predictive log with the displayed default parameters or alternatively adapt the parameters to their
        own preference.

        Side effects:
            - The modified parameters are stored in a container and then the function for creating a predictive log is called.
            - Parameters are displayed in the window.
        """
        if self.pp.mode== ProcessProphetMode.advanced:
            self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
            self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
            self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
            self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
            self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
            self.predictive_event_log_name  = ptg.InputField("predicitive_log1.csv", prompt= "predictive log name: ")
            self.non_stop = ptg.InputField("True", prompt="run until end event: ")
            self.upper = ptg.InputField("30", prompt="non stop upper bound: ")
            self.random_cuts = ptg.InputField("True", prompt="use random cuts: ")
            self.cut_length = ptg.InputField("0", prompt="cut length: ")
            self.sep  = ptg.InputField(",", prompt= "csv separator: ")
            container = [
                "Enter the following params:",
                self.model_name,
                self.log_name,
                self.case_id_key,
                self.case_activity_key,
                self.case_timestamp_key,
                self.predictive_event_log_name,
                self.non_stop,
                self.upper,
                self.random_cuts,
                self.cut_length,
                self.sep, 
                ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_predictive_log()) ),
                "",
                ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
            ]
            window = ptg.Window(*container, width = self.pp.window_width)
            window.center()
        elif self.pp.mode == ProcessProphetMode.quick:
            #: in this mode the random cuts option where 
            # cuts are done until the end is used by default. 
            self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
            self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
            self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
            self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
            self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
            self.predictive_event_log_name  = ptg.InputField("predicitive_log4.csv", prompt= "predictive log name: ")
            self.non_stop = ptg.InputField("True", prompt="run until end event: ")
            self.upper = ptg.InputField("100", prompt="non stop upper bound: ")
            self.random_cuts = ptg.InputField("True", prompt="use random cuts: ")
            self.cut_length = ptg.InputField("0", prompt="cut length: ")
            self.sep  = ptg.InputField(",", prompt= "csv separator: ")
            container = [
                "Enter the following params:",
                self.model_name,
                self.log_name,
                self.case_id_key,
                self.case_activity_key,
                self.case_timestamp_key,
                self.predictive_event_log_name,
                self.sep, 
                ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_predictive_log()) ),
                "",
                ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
            ]
            window = ptg.Window(*container, width = self.pp.window_width)
            window.center()
        return window


    def get_process_mining(self):
        """
        Sends a process mining request to the server with the previously confirmed parameters.

        Side effects:
            - The petri net that the mining algorithm computed is stored in the models directory of the current project,
            and the user can return to previous menus.
            - If unsuccessful, an error is indicated and the user can return to the model menu.
        """
        self.loading("preprocessing data...")
        params = {
            "path_to_log":f"{self.pp.state.predictive_logs_path}/{self.log_name.value}" ,
            "case_id":self.case_id_key.value,
            "activity_key":self.case_activity_key.value,
            "timestamp_key":self.case_timestamp_key.value,
            "config":f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
            "petri_net_path": f"{self.pp.state.petri_nets_path}/{self.petri_net_path.value}",
            "mining_algo_config":{
                "dependency_threshold":self.dependency_threshold.value,
                "and_threshold":self.and_threshold.value,
                "loop_two_threshold":self.loop_two_threshold.value,
                "noise_threshold":self.noise_threshold.value
            },
            "selected_model": self.mining_algorithm.value, 
            "sep": "," 
        }  

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/generate_predictive_process_model", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200: 
            data = response.json()
            #: container to indicate successful generation of predictive process model
            container =[  
                "predictive process model generated successfully", 
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu())), 
                "",
                ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
            ]
        else: 
            data = response.json()
            error = data["error"]
            #: container to indicate that an error occurred from the request to the server
            container = [ 
                "training FAILED:",
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu()))
            ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window


    def set_process_mining(self):
        """
        User can either start the mining with the displayed default parameters or alternatively adapt the parameters to their
        own preference (e.g. select different mining algorithm).

        Modes are not differentiated under this option.

        Side effects:
            - The modified parameters are stored in a container and then the mining function is called.
            - Parameters are displayed in the window.
        """
        self.model_name=  ptg.InputField("f.pt", prompt="model config: ")
        self.log_name=  ptg.InputField("predicitive_log1.csv", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")

        self.petri_net_path= ptg.InputField("p_net1.pnml", prompt= "petri net path: ")
        self.mining_algorithm= ptg.InputField("heuristic_miner", prompt= "process discovery algorithm: ")
        self.dependency_threshold= ptg.InputField("0.5",prompt= "dependency threshold: ")
        self.and_threshold= ptg.InputField("0.65", prompt= "and threshold: ")
        self.loop_two_threshold= ptg.InputField("0.5", prompt= "loop two threshold: ")
        self.noise_threshold= ptg.InputField("0", prompt= "noise threshold: ")
        #: container to store and indicate all the needed parameters
        container = [
            "Enter the following params:",
            self.model_name,
            self.log_name,
            self.case_id_key,
            self.case_activity_key,
            self.case_timestamp_key,
            self.petri_net_path, 
            self.mining_algorithm, 
            self.dependency_threshold, 
            self.and_threshold, 
            self.loop_two_threshold, 
            self.noise_threshold, 
            ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_process_mining()) ),
            "",
            ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
        ]
        window = ptg.Window(*container, width = self.pp.window_width)
        window.center()
        return window

    def get_conformance_checking(self):
        """
        Sends a conformance checking request to the server with the previously confirmed parameters.

        Side effects:
            - The fitness that the conformance checking algorithm computed is displayed and the user can return to previous menus.
            - If unsuccessful, an error is indicated and the user can return to the model menu.
        """
        self.loading("preprocessing data...")


        is_xes = True if self.log_name.value[-3:] == "xes"  else False
        params = {
            "path_to_log":f"{self.pp.state.input_logs_path}/{self.log_name.value}" ,
            "case_id":self.case_id_key.value,
            "activity_key":self.case_activity_key.value,
            "timestamp_key":self.case_timestamp_key.value,
            "petri_net_path": f"{self.pp.state.petri_nets_path}/{self.petri_net_path.value}",
            "conformance_technique":self.conformance_technique.value, 
            "is_xes": is_xes
        }  

        response = requests.post(
            f"http://{SERVER_NAME}:{SERVER_PORT}/conformance", 
            json= params,
            timeout =TIMEOUT
        )
        if response.status_code == 200: 
            data = response.json()
            #: container to indicate successful conformance checking and the computed fitness of the process model
            container =[  
                "conformance checking ready", 
                f"fitness: {data['fitness']}", 
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu())), 
                "",
                ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
            ]
        else: 
            data = response.json()
            error = data["error"]
            #: container to indicate that an error occurred from the request to the server
            container = [ 
                "training FAILED:",
                "",
                f"{error}", 
                "",
                ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu()))
            ]
        window = ptg.Window(*container, box="DOUBLE")
        window.center()
        return window

    def set_conformance_checking(self):
        """
        User can either start the conformance checking with the displayed default parameters or alternatively adapt the parameters to their
        own preference (e.g. select different conformance checking algorithm).

        Modes are not differentiated for this option.

        Side effects:
            - The modified parameters are stored in a container and then the conformance checking function is called.
            - Parameters are displayed in the window.
        """
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")

        self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.petri_net_path= ptg.InputField("p_net1.pnml", prompt= "petri net path: ")

        self.conformance_technique= ptg.InputField("token",prompt= "conformance technique: ")
        #: container to store and indicate all the needed parameters
        container = [
            "Enter the following params:",
            self.log_name,
            self.case_id_key,
            self.case_activity_key,
            self.case_timestamp_key,
            self.petri_net_path, 
            self.conformance_technique,
            ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_conformance_checking()) ),
                "",
            ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
        ]
        window = ptg.Window(*container, width = self.pp.window_width)
        window.center()
        return window

__init__(pp)

Initialize ProcessProphet instance and model main menu.

Parameters:

Name Type Description Default
pp ProcessProphet

The ProcessProphet instance in charge of window management.

required

Other state parameters that make sense in the context of conformance checking might also be saved here.

Source code in CLI/ProcessProphetModel.py
20
21
22
23
24
25
26
27
28
29
30
def __init__(self, pp):
    """
    Initialize ProcessProphet instance and model main menu.

    Args:
        pp (ProcessProphet): The ProcessProphet instance in charge of window management.

    Other state parameters that make sense in the context of conformance checking might also be saved here.
    """
    self.pp = pp
    self.pp.switch_window(self.model_main_menu())

get_conformance_checking()

Sends a conformance checking request to the server with the previously confirmed parameters.

Side effects
  • The fitness that the conformance checking algorithm computed is displayed and the user can return to previous menus.
  • If unsuccessful, an error is indicated and the user can return to the model menu.
Source code in CLI/ProcessProphetModel.py
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
def get_conformance_checking(self):
    """
    Sends a conformance checking request to the server with the previously confirmed parameters.

    Side effects:
        - The fitness that the conformance checking algorithm computed is displayed and the user can return to previous menus.
        - If unsuccessful, an error is indicated and the user can return to the model menu.
    """
    self.loading("preprocessing data...")


    is_xes = True if self.log_name.value[-3:] == "xes"  else False
    params = {
        "path_to_log":f"{self.pp.state.input_logs_path}/{self.log_name.value}" ,
        "case_id":self.case_id_key.value,
        "activity_key":self.case_activity_key.value,
        "timestamp_key":self.case_timestamp_key.value,
        "petri_net_path": f"{self.pp.state.petri_nets_path}/{self.petri_net_path.value}",
        "conformance_technique":self.conformance_technique.value, 
        "is_xes": is_xes
    }  

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/conformance", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200: 
        data = response.json()
        #: container to indicate successful conformance checking and the computed fitness of the process model
        container =[  
            "conformance checking ready", 
            f"fitness: {data['fitness']}", 
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu())), 
            "",
            ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
        ]
    else: 
        data = response.json()
        error = data["error"]
        #: container to indicate that an error occurred from the request to the server
        container = [ 
            "training FAILED:",
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu()))
        ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

get_predictive_log()

Sends a request to create a predictive log to the server with the previously confirmed parameters.

Side effects
  • The predictive log that the RNN computed is stored in the predictive_logs directory of the current project, and the user can return to previous menus.
  • If unsuccessful, an error is indicated and the user can return to the model menu.
Source code in CLI/ProcessProphetModel.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def get_predictive_log(self): 
    """
    Sends a request to create a predictive log to the server with the previously confirmed
    parameters.

    Side effects:
        - The predictive log that the RNN computed is stored in the predictive_logs directory of the current project,
        and the user can return to previous menus.
        - If unsuccessful, an error is indicated and the user can return to the model menu.
    """
    self.loading("preprocessing data...")
    #: check the file type of the input log
    is_xes = True if self.log_name.value[-3:] == "xes"  else False

    params = {
        "path_to_model":f"{self.pp.state.models_path}/{self.model_name.value}",
        "path_to_log":f"{self.pp.state.input_logs_path}/{self.log_name.value}" ,
        "case_id":self.case_id_key.value,
        "activity_key":self.case_activity_key.value,
        "timestamp_key":self.case_timestamp_key.value,
        "new_log_path":f"{self.pp.state.predictive_logs_path}/{self.predictive_event_log_name.value}",
        "non_stop":True if self.non_stop.value== "True" else False,
        "upper":self.upper .value,
        "random_cuts": True if self.random_cuts.value == "True" else False,
        "cut_length":self.cut_length.value,
        "config":f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
        "is_xes": is_xes, 
        "sep": self.sep.value
    }  

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/generate_predictive_log", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200:
        #: container to indicate successful generation of predictive log 
        data = response.json()
        container =[  
            "predictive process model generated successfully", 
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu())), 
            "",
            ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
        ]
    else:
        #: container to indicate that an error occurred from the request to the server 
        data = response.json()
        error = data["error"]
        container = [ 
            "training FAILED:",
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu()))
        ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

get_process_mining()

Sends a process mining request to the server with the previously confirmed parameters.

Side effects
  • The petri net that the mining algorithm computed is stored in the models directory of the current project, and the user can return to previous menus.
  • If unsuccessful, an error is indicated and the user can return to the model menu.
Source code in CLI/ProcessProphetModel.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
def get_process_mining(self):
    """
    Sends a process mining request to the server with the previously confirmed parameters.

    Side effects:
        - The petri net that the mining algorithm computed is stored in the models directory of the current project,
        and the user can return to previous menus.
        - If unsuccessful, an error is indicated and the user can return to the model menu.
    """
    self.loading("preprocessing data...")
    params = {
        "path_to_log":f"{self.pp.state.predictive_logs_path}/{self.log_name.value}" ,
        "case_id":self.case_id_key.value,
        "activity_key":self.case_activity_key.value,
        "timestamp_key":self.case_timestamp_key.value,
        "config":f"{self.pp.state.models_path}/{self.model_name.value[:-3]}.config.json",
        "petri_net_path": f"{self.pp.state.petri_nets_path}/{self.petri_net_path.value}",
        "mining_algo_config":{
            "dependency_threshold":self.dependency_threshold.value,
            "and_threshold":self.and_threshold.value,
            "loop_two_threshold":self.loop_two_threshold.value,
            "noise_threshold":self.noise_threshold.value
        },
        "selected_model": self.mining_algorithm.value, 
        "sep": "," 
    }  

    response = requests.post(
        f"http://{SERVER_NAME}:{SERVER_PORT}/generate_predictive_process_model", 
        json= params,
        timeout =TIMEOUT
    )
    if response.status_code == 200: 
        data = response.json()
        #: container to indicate successful generation of predictive process model
        container =[  
            "predictive process model generated successfully", 
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu())), 
            "",
            ptg.Button(f"{self.pp.button_color}action menu", lambda *_:  self.return_to_menu())
        ]
    else: 
        data = response.json()
        error = data["error"]
        #: container to indicate that an error occurred from the request to the server
        container = [ 
            "training FAILED:",
            "",
            f"{error}", 
            "",
            ptg.Button(f"{self.pp.button_color}back", lambda *_: self.pp.switch_window(self.model_main_menu()))
        ]
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    return window

loading(message='')

a loading screen

Source code in CLI/ProcessProphetModel.py
39
40
41
42
43
44
45
46
47
48
49
def loading(self, message = ""): 
    """
    a loading screen 
    """
    container = ptg.Container(
        "Loading...", 
        message
    )
    window = ptg.Window(*container, box="DOUBLE")
    window.center()
    self.pp.switch_window(window)

model_main_menu()

menu to select one of the process mining, conformance checking, creation of a predictive log or go back to the previous menu

Source code in CLI/ProcessProphetModel.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def model_main_menu(self):
    """
    menu to select one of the process mining, conformance checking, creation of a predictive log
    or go back to the previous menu
    """
    #: container to indicate the different options of this part of the CLI
    container = ptg.Container(
        "Select one action", 
        "", 
        ptg.Button(f"{self.pp.button_color}Create a predictive event log", lambda *_: self.pp.switch_window(self.set_predictive_log())), 
        "", 
        ptg.Button(f"{self.pp.button_color}Run process mining", lambda *_: self.pp.switch_window(self.set_process_mining())), 
        "", 
        ptg.Button(f"{self.pp.button_color}Run conformance checking", lambda *_: self.pp.switch_window(self.set_conformance_checking())), 
        "",
        ptg.Button(f"{self.pp.button_color}Back", lambda *_: self.return_to_menu()), 
    )

    window = ptg.Window(container, box="DOUBLE")
    window.center()
    return window 

return_to_menu()

returns to p.p. start

Source code in CLI/ProcessProphetModel.py
33
34
35
36
37
def return_to_menu(self):
    """
    returns to p.p. start
    """
    pp_start = ProcessProphetStart.ProcessProphetStart(self.pp, False)

set_conformance_checking()

User can either start the conformance checking with the displayed default parameters or alternatively adapt the parameters to their own preference (e.g. select different conformance checking algorithm).

Modes are not differentiated for this option.

Side effects
  • The modified parameters are stored in a container and then the conformance checking function is called.
  • Parameters are displayed in the window.
Source code in CLI/ProcessProphetModel.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
def set_conformance_checking(self):
    """
    User can either start the conformance checking with the displayed default parameters or alternatively adapt the parameters to their
    own preference (e.g. select different conformance checking algorithm).

    Modes are not differentiated for this option.

    Side effects:
        - The modified parameters are stored in a container and then the conformance checking function is called.
        - Parameters are displayed in the window.
    """
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")

    self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
    self.petri_net_path= ptg.InputField("p_net1.pnml", prompt= "petri net path: ")

    self.conformance_technique= ptg.InputField("token",prompt= "conformance technique: ")
    #: container to store and indicate all the needed parameters
    container = [
        "Enter the following params:",
        self.log_name,
        self.case_id_key,
        self.case_activity_key,
        self.case_timestamp_key,
        self.petri_net_path, 
        self.conformance_technique,
        ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_conformance_checking()) ),
            "",
        ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
    ]
    window = ptg.Window(*container, width = self.pp.window_width)
    window.center()
    return window

set_predictive_log()

User can either start generating a predictive log with the displayed default parameters or alternatively adapt the parameters to their own preference.

Side effects
  • The modified parameters are stored in a container and then the function for creating a predictive log is called.
  • Parameters are displayed in the window.
Source code in CLI/ProcessProphetModel.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def set_predictive_log(self):
    """
    User can either start generating a predictive log with the displayed default parameters or alternatively adapt the parameters to their
    own preference.

    Side effects:
        - The modified parameters are stored in a container and then the function for creating a predictive log is called.
        - Parameters are displayed in the window.
    """
    if self.pp.mode== ProcessProphetMode.advanced:
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
        self.predictive_event_log_name  = ptg.InputField("predicitive_log1.csv", prompt= "predictive log name: ")
        self.non_stop = ptg.InputField("True", prompt="run until end event: ")
        self.upper = ptg.InputField("30", prompt="non stop upper bound: ")
        self.random_cuts = ptg.InputField("True", prompt="use random cuts: ")
        self.cut_length = ptg.InputField("0", prompt="cut length: ")
        self.sep  = ptg.InputField(",", prompt= "csv separator: ")
        container = [
            "Enter the following params:",
            self.model_name,
            self.log_name,
            self.case_id_key,
            self.case_activity_key,
            self.case_timestamp_key,
            self.predictive_event_log_name,
            self.non_stop,
            self.upper,
            self.random_cuts,
            self.cut_length,
            self.sep, 
            ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_predictive_log()) ),
            "",
            ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
        ]
        window = ptg.Window(*container, width = self.pp.window_width)
        window.center()
    elif self.pp.mode == ProcessProphetMode.quick:
        #: in this mode the random cuts option where 
        # cuts are done until the end is used by default. 
        self.model_name=  ptg.InputField("f.pt", prompt="model name: ")
        self.log_name=  ptg.InputField("Hospital_log.xes", prompt="log name: ")
        self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
        self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
        self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")
        self.predictive_event_log_name  = ptg.InputField("predicitive_log4.csv", prompt= "predictive log name: ")
        self.non_stop = ptg.InputField("True", prompt="run until end event: ")
        self.upper = ptg.InputField("100", prompt="non stop upper bound: ")
        self.random_cuts = ptg.InputField("True", prompt="use random cuts: ")
        self.cut_length = ptg.InputField("0", prompt="cut length: ")
        self.sep  = ptg.InputField(",", prompt= "csv separator: ")
        container = [
            "Enter the following params:",
            self.model_name,
            self.log_name,
            self.case_id_key,
            self.case_activity_key,
            self.case_timestamp_key,
            self.predictive_event_log_name,
            self.sep, 
            ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_predictive_log()) ),
            "",
            ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
        ]
        window = ptg.Window(*container, width = self.pp.window_width)
        window.center()
    return window

set_process_mining()

User can either start the mining with the displayed default parameters or alternatively adapt the parameters to their own preference (e.g. select different mining algorithm).

Modes are not differentiated under this option.

Side effects
  • The modified parameters are stored in a container and then the mining function is called.
  • Parameters are displayed in the window.
Source code in CLI/ProcessProphetModel.py
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
def set_process_mining(self):
    """
    User can either start the mining with the displayed default parameters or alternatively adapt the parameters to their
    own preference (e.g. select different mining algorithm).

    Modes are not differentiated under this option.

    Side effects:
        - The modified parameters are stored in a container and then the mining function is called.
        - Parameters are displayed in the window.
    """
    self.model_name=  ptg.InputField("f.pt", prompt="model config: ")
    self.log_name=  ptg.InputField("predicitive_log1.csv", prompt="log name: ")
    self.case_id_key=  ptg.InputField("case:concept:name", prompt="case id key: ")
    self.case_activity_key=  ptg.InputField("concept:name", prompt="activity key: ")
    self.case_timestamp_key=  ptg.InputField("time:timestamp", prompt="timestamp key: ")

    self.petri_net_path= ptg.InputField("p_net1.pnml", prompt= "petri net path: ")
    self.mining_algorithm= ptg.InputField("heuristic_miner", prompt= "process discovery algorithm: ")
    self.dependency_threshold= ptg.InputField("0.5",prompt= "dependency threshold: ")
    self.and_threshold= ptg.InputField("0.65", prompt= "and threshold: ")
    self.loop_two_threshold= ptg.InputField("0.5", prompt= "loop two threshold: ")
    self.noise_threshold= ptg.InputField("0", prompt= "noise threshold: ")
    #: container to store and indicate all the needed parameters
    container = [
        "Enter the following params:",
        self.model_name,
        self.log_name,
        self.case_id_key,
        self.case_activity_key,
        self.case_timestamp_key,
        self.petri_net_path, 
        self.mining_algorithm, 
        self.dependency_threshold, 
        self.and_threshold, 
        self.loop_two_threshold, 
        self.noise_threshold, 
        ptg.Button(f"{self.pp.button_color}continue",lambda *_: self.pp.switch_window(self.get_process_mining()) ),
        "",
        ptg.Button(f"{self.pp.button_color}back",lambda *_: self.pp.switch_window(self.model_main_menu()) )
    ]
    window = ptg.Window(*container, width = self.pp.window_width)
    window.center()
    return window