harshvardhan96 commited on
Commit
6a383e5
·
1 Parent(s): 8ccacc4

updated app.py file

Browse files
Files changed (1) hide show
  1. app.py +76 -66
app.py CHANGED
@@ -1,76 +1,86 @@
1
  import pickle
 
2
  import tensorflow.compat.v1 as tf
3
  tf.disable_v2_behavior()
4
- #loading the saved parameters
5
- def load_params():
6
- with open('./params.p', mode='rb') as in_file:
7
- return pickle.load(in_file)
8
 
9
- def load_preprocess():
10
- with open('./preprocess.p', mode='rb') as in_file:
11
- return pickle.load(in_file)
12
-
13
- #getting the source and target vocabuaries
14
- _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = load_preprocess()
15
-
16
- load_path = load_params()
17
- print("Loaded path:", load_path)
18
- print(type(load_path))
19
-
20
- batch_size = 30
21
-
22
- #converting the words to vectors of integers
23
- def word_to_seq(word, vocab_to_int):
24
- results = []
25
- for word in list(word):
26
- if word in vocab_to_int:
27
- results.append(vocab_to_int[word])
28
- else:
29
- results.append(vocab_to_int['<UNK>'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- return results
32
-
33
- #taking user input for prediction
34
- print("\n Enter word to be transliterated:")
35
- transliterate_word = input().lower()
36
-
37
- transliterate_word = word_to_seq(transliterate_word, source_vocab_to_int)
38
-
39
- #initialising the graph
40
- loaded_graph = tf.Graph()
41
-
42
- #initialising the session
43
- tf.compat.v1.Session()
44
- # with tf.Session(graph=loaded_graph) as sess:
45
- with tf.compat.v1.Session(graph=loaded_graph) as sess:
46
 
47
- # Load saved model
48
- loader = tf.train.import_meta_graph("./dev.meta")
49
 
50
- # tf.train.Saver.restore(sess,load_path)
51
- loader.restore(sess, "./dev")
52
-
53
- #providing placeholder names from the loaded graph
54
- input_data = loaded_graph.get_tensor_by_name('input:0')
55
- logits = loaded_graph.get_tensor_by_name('predictions:0')
56
- target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
57
- keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
58
-
59
- #transliterating the given word
60
- transliterate_logits = sess.run(logits, {input_data: [transliterate_word]*batch_size,
61
- target_sequence_length: [len(transliterate_word)]*batch_size,
62
- keep_prob: 1.0})[0]
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- print('Input')
65
- print(' Word Ids: {}'.format([i for i in transliterate_word]))
66
- print(' English Word: {}'.format([source_int_to_vocab[i] for i in transliterate_word]))
67
 
68
- print('\nPrediction')
69
- print(' Word Id: {}'.format([i for i in transliterate_logits]))
70
 
71
- #showing the output
72
- output = ""
73
- for i in transliterate_logits:
74
- if target_int_to_vocab[i]!= '<EOS>':
75
- output = output + target_int_to_vocab[i]
76
- print(' Hindi Word: {}'.format(output))
 
1
  import pickle
2
+ import gradio
3
  import tensorflow.compat.v1 as tf
4
  tf.disable_v2_behavior()
 
 
 
 
5
 
6
+ def transliterate_eng_hin(input_word):
7
+ #loading the saved parameters
8
+ def load_params():
9
+ with open('./params.p', mode='rb') as in_file:
10
+ return pickle.load(in_file)
11
+
12
+ def load_preprocess():
13
+ with open('./preprocess.p', mode='rb') as in_file:
14
+ return pickle.load(in_file)
15
+
16
+ #getting the source and target vocabuaries
17
+ _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = load_preprocess()
18
+
19
+ load_path = load_params()
20
+ print("Loaded path:", load_path)
21
+ print(type(load_path))
22
+
23
+ batch_size = 30
24
+
25
+ #converting the words to vectors of integers
26
+ def word_to_seq(word, vocab_to_int):
27
+ results = []
28
+ for word in list(word):
29
+ if word in vocab_to_int:
30
+ results.append(vocab_to_int[word])
31
+ else:
32
+ results.append(vocab_to_int['<UNK>'])
33
+
34
+ return results
35
+
36
+ #taking user input for prediction
37
+ # print("\n Enter word to be transliterated:")
38
+ transliterate_word = input_word.lower()
39
+
40
+ transliterate_word = word_to_seq(transliterate_word, source_vocab_to_int)
41
+
42
+ #initialising the graph
43
+ loaded_graph = tf.Graph()
44
+
45
+ #initialising the session
46
+ tf.compat.v1.Session()
47
+ # with tf.Session(graph=loaded_graph) as sess:
48
+ with tf.compat.v1.Session(graph=loaded_graph) as sess:
49
 
50
+ # Load saved model
51
+ loader = tf.train.import_meta_graph("./dev.meta")
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ # tf.train.Saver.restore(sess,load_path)
54
+ loader.restore(sess, "./dev")
55
 
56
+ #providing placeholder names from the loaded graph
57
+ input_data = loaded_graph.get_tensor_by_name('input:0')
58
+ logits = loaded_graph.get_tensor_by_name('predictions:0')
59
+ target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
60
+ keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
61
+
62
+ #transliterating the given word
63
+ transliterate_logits = sess.run(logits, {input_data: [transliterate_word]*batch_size,
64
+ target_sequence_length: [len(transliterate_word)]*batch_size,
65
+ keep_prob: 1.0})[0]
66
+
67
+ print('Input')
68
+ print(' Word Ids: {}'.format([i for i in transliterate_word]))
69
+ print(' English Word: {}'.format([source_int_to_vocab[i] for i in transliterate_word]))
70
+
71
+ print('\nPrediction')
72
+ print(' Word Id: {}'.format([i for i in transliterate_logits]))
73
+
74
+ #showing the output
75
+ output = ""
76
+ for i in transliterate_logits:
77
+ if target_int_to_vocab[i]!= '<EOS>':
78
+ output = output + target_int_to_vocab[i]
79
+ print(' Hindi Word: {}'.format(output))
80
 
81
+ return output
 
 
82
 
83
+ demo = gr.Interface(fn=transliterate_eng_hin, inputs="text", outputs="text")
 
84
 
85
+ if __name__ == "__main__":
86
+ demo.launch()