Spaces:
Sleeping
Sleeping
hindi abusive api added
Browse files
__pycache__/clean.cpython-38.pyc
ADDED
|
Binary file (1.15 kB). View file
|
|
|
__pycache__/language_detection.cpython-38.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
__pycache__/language_detection.cpython-39.pyc
CHANGED
|
Binary files a/__pycache__/language_detection.cpython-39.pyc and b/__pycache__/language_detection.cpython-39.pyc differ
|
|
|
app.py
CHANGED
|
@@ -6,6 +6,8 @@ import nltk
|
|
| 6 |
nltk.download('wordnet')
|
| 7 |
import numpy as np
|
| 8 |
import language_detection
|
|
|
|
|
|
|
| 9 |
print("all imports worked")
|
| 10 |
# Load pre-trained model
|
| 11 |
model = joblib.load('model_joblib.pkl')
|
|
@@ -13,6 +15,12 @@ print("model load ")
|
|
| 13 |
tf = joblib.load('tf_joblib.pkl')
|
| 14 |
print("tfidf load ")
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
# Define function to predict whether sentence is abusive or not
|
| 17 |
def predict_abusive_lang(text):
|
| 18 |
print("original text ", text)
|
|
@@ -34,8 +42,22 @@ def predict_abusive_lang(text):
|
|
| 34 |
else :
|
| 35 |
return ["Please write something in the comment box..","No cleaned text"]
|
| 36 |
elif lang=='hi':
|
|
|
|
| 37 |
print("using hugging face api")
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
else :
|
| 40 |
return ["UN","No cleaned text"]
|
| 41 |
|
|
|
|
| 6 |
nltk.download('wordnet')
|
| 7 |
import numpy as np
|
| 8 |
import language_detection
|
| 9 |
+
import requests
|
| 10 |
+
|
| 11 |
print("all imports worked")
|
| 12 |
# Load pre-trained model
|
| 13 |
model = joblib.load('model_joblib.pkl')
|
|
|
|
| 15 |
tf = joblib.load('tf_joblib.pkl')
|
| 16 |
print("tfidf load ")
|
| 17 |
|
| 18 |
+
def query(payload):
|
| 19 |
+
API_URL = "https://api-inference.huggingface.co/models/Hate-speech-CNERG/hindi-abusive-MuRIL"
|
| 20 |
+
headers = {"Authorization": "Bearer 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"}
|
| 21 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 22 |
+
return response.json()
|
| 23 |
+
|
| 24 |
# Define function to predict whether sentence is abusive or not
|
| 25 |
def predict_abusive_lang(text):
|
| 26 |
print("original text ", text)
|
|
|
|
| 42 |
else :
|
| 43 |
return ["Please write something in the comment box..","No cleaned text"]
|
| 44 |
elif lang=='hi':
|
| 45 |
+
|
| 46 |
print("using hugging face api")
|
| 47 |
+
output = query({
|
| 48 |
+
"inputs": text#"खान चाचा को मेरा सला"
|
| 49 |
+
})
|
| 50 |
+
print(output, len(output))
|
| 51 |
+
# if(len(output))
|
| 52 |
+
l_0 = float(output[0][0]['score'])
|
| 53 |
+
l_1 = float(output[0][1]['score'])
|
| 54 |
+
if output[0][0]['label']=='LABEL_1' :
|
| 55 |
+
if l_0>l_1:
|
| 56 |
+
return ["AB",text]
|
| 57 |
+
|
| 58 |
+
else :
|
| 59 |
+
return ["NA",text]
|
| 60 |
+
|
| 61 |
else :
|
| 62 |
return ["UN","No cleaned text"]
|
| 63 |
|