ShadowProgrammer commited on
Commit
ec1506e
·
1 Parent(s): f13b70a

Delete testing.py

Browse files
Files changed (1) hide show
  1. testing.py +0 -69
testing.py DELETED
@@ -1,69 +0,0 @@
1
- import time
2
-
3
- print("Loading libraries...")
4
- start_time = time.time()
5
-
6
- import sklearn
7
- from sklearn.model_selection import train_test_split
8
- from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, mean_squared_error
9
- from sklearn.neural_network import MLPRegressor
10
- from sklearn.feature_extraction.text import CountVectorizer
11
- import matplotlib.pyplot as plt
12
- import numpy as np
13
- import datasets
14
- import pickle
15
-
16
- print(f"Libraries loaded in {round((time.time() - start_time) * 1000, 3)} ms.")
17
-
18
- print("Setting configuration...")
19
- start_time = time.time()
20
-
21
- # Set configuration
22
- sklearn.set_config(working_memory=4096)
23
- data_size = 100000
24
-
25
-
26
- print(f"Configuration set in {round((time.time() - start_time) * 1000, 3)} ms.")
27
-
28
- print("Loading model and vectorizer...")
29
- start_time = time.time()
30
-
31
- with open('model.pkl', 'rb') as model_file:
32
- mlp = pickle.load(model_file)
33
-
34
- with open('vectorizer.pkl', 'rb') as vectorizer_file:
35
- count_vect = pickle.load(vectorizer_file)
36
-
37
- print(f"Model and vectorizer loaded in {round((time.time() - start_time) * 1000, 3)} ms.")
38
-
39
- print("Loading data...")
40
- start_time = time.time()
41
-
42
- # Load data
43
- dataset = datasets.load_dataset('ucberkeley-dlab/measuring-hate-speech', 'binary')
44
- df = dataset['train'].to_pandas()
45
-
46
- print(f"Data loaded in {round((time.time() - start_time) * 1000, 3)} ms.")
47
- print(df.head())
48
-
49
- print("Fitting vectorizer...")
50
- start_time = time.time()
51
-
52
- # Extract text and labels
53
- X_text = df['text'][:data_size] # Assuming 'text' is the column containing the text data
54
-
55
- # Convert text to vectors
56
- X = count_vect.fit(X_text)
57
-
58
- print(f"Vectorizer fit in {round((time.time() - start_time) * 1000, 3)} ms.")
59
-
60
- print("Predicting...")
61
- start_time = time.time()
62
-
63
- sentences = ["It is about time for all illegals to go back to their country of origin and keep our freeway open and prevent heavy traffic.", "OMG! The EGO\'s of these young, young, inexperienced women display are remarkable! So self absorbed - so, so proud of the mistakes their constituents all made in giving them a bit of power. They\'ll be quite surprised to find that the vast majority of Americans think that these women represent the very WORST of \"modern\" America. Egos out of control !!", "Instagram refugees lmao, let's build a wall instead"]
64
- vectorized_sentences = count_vect.transform(sentences)
65
-
66
- predictions = mlp.predict(vectorized_sentences)
67
- # write dict of sentences and predictions
68
- values = {sentences[i]: predictions[i] for i in range(len(sentences))}
69
- print(values)