jonathanjordan21 commited on
Commit
3dc443c
1 Parent(s): 7e435ad

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +49 -8
README.md CHANGED
@@ -6,6 +6,8 @@ tags:
6
  ---
7
  ---
8
 
 
 
9
  import contextlib
10
  import os
11
  from matplotlib import pyplot as plt
@@ -28,7 +30,11 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
28
  tokenizer = AutoTokenizer.from_pretrained("janpase97/codeformer-pretrained")
29
 
30
  model = AutoModelForSeq2SeqLM.from_pretrained("janpase97/codeformer-pretrained")
 
31
 
 
 
 
32
  def check_graphics_api(target_app_name):
33
  graphics_api = None
34
 
@@ -43,9 +49,11 @@ def check_graphics_api(target_app_name):
43
  elif "vulkan" in output:
44
  graphics_api = "VULKAN"
45
  return graphics_api
46
-
47
 
48
  # Get the target application's process object
 
 
49
  def get_target_app_process(target_app_name):
50
  return next(
51
  (
@@ -55,8 +63,11 @@ def get_target_app_process(target_app_name):
55
  ),
56
  None,
57
  )
 
58
 
59
  # Attach the AI to the application's process by PID
 
 
60
  def attach_ai_to_app_pid(target_app_process):
61
  if target_app_process is not None:
62
  print(f"AI is attached to the application's process with PID: {target_app_process.pid}")
@@ -64,20 +75,29 @@ def attach_ai_to_app_pid(target_app_process):
64
  else:
65
  print("Could not find the target application's process to attach the AI.")
66
  return False
 
67
 
68
  # Check if the targeted application is running
 
 
69
  def is_target_app_running(target_app_name):
70
  return any(
71
  process.info['name'] == target_app_name
72
  for process in psutil.process_iter(['name'])
73
  )
 
74
 
75
  # Create the directory if it doesn't exist
 
 
76
  directory = r"G:\Epic Games\GTAV\GTA5_AI\trained_models"
77
  if not os.path.exists(directory):
78
  os.makedirs(directory)
 
79
 
80
  # Define the neural network model
 
 
81
  class NanoCircuit(nn.Module):
82
  def __init__(self):
83
  super(NanoCircuit, self).__init__()
@@ -89,22 +109,33 @@ class NanoCircuit(nn.Module):
89
  x = torch.relu(self.fc1(x))
90
  x = self.fc2(x)
91
  return x
 
92
 
93
  # Set the device to GPU if available
94
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
95
 
96
  # Load the MNIST dataset
 
 
97
  transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
98
  train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
99
  train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
100
-
101
 
102
  # Initialize the model and move it to the GPU
 
 
103
  model = NanoCircuit().to(device)
104
  criterion = nn.CrossEntropyLoss()
105
  optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
 
106
 
107
  # Train the model on the GPU with a data cap
 
 
108
  def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_cap_gb):
109
  data_processed = 0
110
  data_cap_bytes = data_cap_gb * (1024 ** 3)
@@ -135,15 +166,19 @@ def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_c
135
  print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
136
 
137
  return model
138
-
139
 
140
  # Save the updated model as a .onnx file
 
 
141
  def save_model(model, filepath):
142
  dummy_input = torch.randn(1, 1, 28, 28).to(device)
143
  torch.onnx.export(model, dummy_input, filepath, input_names=['input'], output_names=['output'], opset_version=11)
144
-
145
 
146
  # Train the model with a 1 GB data cap
 
 
147
  trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, device, data_cap_gb=50)
148
  save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
149
 
@@ -208,8 +243,11 @@ def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_c
208
  print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
209
 
210
  return model
 
211
 
212
  # Train the model with a 10 GB data cap
 
 
213
  trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, os.device_encoding, data_cap_gb=10)
214
  save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
215
 
@@ -325,9 +363,11 @@ while True:
325
  if not is_target_app_running(target_app_name):
326
  print("Target application not detected in 5 seconds. Shutting down the AI.")
327
  break
 
328
 
329
-
330
- #Generate some random data for the boxplots
 
331
  np.random.seed(0)
332
  original_data = np.random.normal(0, 1, 100)
333
  trained_data = np.random.normal(0.5, 1, 100)
@@ -368,4 +408,5 @@ while True:
368
 
369
  if not is_target_app_running(target_app_name):
370
  print("Target application not detected in 5 seconds. Shutting down the AI.")
371
- break
 
 
6
  ---
7
  ---
8
 
9
+ # Initialize Model and Tokenizer
10
+ ```python
11
  import contextlib
12
  import os
13
  from matplotlib import pyplot as plt
 
30
  tokenizer = AutoTokenizer.from_pretrained("janpase97/codeformer-pretrained")
31
 
32
  model = AutoModelForSeq2SeqLM.from_pretrained("janpase97/codeformer-pretrained")
33
+ ```
34
 
35
+ # Check for the graphics API
36
+
37
+ ```python
38
  def check_graphics_api(target_app_name):
39
  graphics_api = None
40
 
 
49
  elif "vulkan" in output:
50
  graphics_api = "VULKAN"
51
  return graphics_api
52
+ ```
53
 
54
  # Get the target application's process object
55
+
56
+ ```python
57
  def get_target_app_process(target_app_name):
58
  return next(
59
  (
 
63
  ),
64
  None,
65
  )
66
+ ```
67
 
68
  # Attach the AI to the application's process by PID
69
+
70
+ ```python
71
  def attach_ai_to_app_pid(target_app_process):
72
  if target_app_process is not None:
73
  print(f"AI is attached to the application's process with PID: {target_app_process.pid}")
 
75
  else:
76
  print("Could not find the target application's process to attach the AI.")
77
  return False
78
+ ```
79
 
80
  # Check if the targeted application is running
81
+
82
+ ```python
83
  def is_target_app_running(target_app_name):
84
  return any(
85
  process.info['name'] == target_app_name
86
  for process in psutil.process_iter(['name'])
87
  )
88
+ ```
89
 
90
  # Create the directory if it doesn't exist
91
+
92
+ ```python
93
  directory = r"G:\Epic Games\GTAV\GTA5_AI\trained_models"
94
  if not os.path.exists(directory):
95
  os.makedirs(directory)
96
+ ```
97
 
98
  # Define the neural network model
99
+
100
+ ```python
101
  class NanoCircuit(nn.Module):
102
  def __init__(self):
103
  super(NanoCircuit, self).__init__()
 
109
  x = torch.relu(self.fc1(x))
110
  x = self.fc2(x)
111
  return x
112
+ ```
113
 
114
  # Set the device to GPU if available
115
+
116
+ ```python
117
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "CPU")
118
+ ```
119
 
120
  # Load the MNIST dataset
121
+
122
+ ```python
123
  transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
124
  train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
125
  train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
126
+ ```
127
 
128
  # Initialize the model and move it to the GPU
129
+
130
+ ```python
131
  model = NanoCircuit().to(device)
132
  criterion = nn.CrossEntropyLoss()
133
  optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
134
+ ```
135
 
136
  # Train the model on the GPU with a data cap
137
+
138
+ ```python
139
  def train_with_data_cap(model, data_loader, criterion, optimizer, device, data_cap_gb):
140
  data_processed = 0
141
  data_cap_bytes = data_cap_gb * (1024 ** 3)
 
166
  print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
167
 
168
  return model
169
+ ```
170
 
171
  # Save the updated model as a .onnx file
172
+
173
+ ```python
174
  def save_model(model, filepath):
175
  dummy_input = torch.randn(1, 1, 28, 28).to(device)
176
  torch.onnx.export(model, dummy_input, filepath, input_names=['input'], output_names=['output'], opset_version=11)
177
+ ```
178
 
179
  # Train the model with a 1 GB data cap
180
+
181
+ ```python
182
  trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, device, data_cap_gb=50)
183
  save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
184
 
 
243
  print(f"Data processed: {data_processed / (1024 ** 3):.2f} GB")
244
 
245
  return model
246
+ ```
247
 
248
  # Train the model with a 10 GB data cap
249
+
250
+ ```python
251
  trained_model = train_with_data_cap(model, train_loader, criterion, optimizer, os.device_encoding, data_cap_gb=10)
252
  save_model(trained_model, os.path.join(directory, 'GTA5_TRAINED.onnx'))
253
 
 
363
  if not is_target_app_running(target_app_name):
364
  print("Target application not detected in 5 seconds. Shutting down the AI.")
365
  break
366
+ ```
367
 
368
+ # Generate some random data for the boxplots
369
+
370
+ ```python
371
  np.random.seed(0)
372
  original_data = np.random.normal(0, 1, 100)
373
  trained_data = np.random.normal(0.5, 1, 100)
 
408
 
409
  if not is_target_app_running(target_app_name):
410
  print("Target application not detected in 5 seconds. Shutting down the AI.")
411
+ break
412
+ ```