Question answering model of hugging face transformers shows errors in live server

66 views Asked by At

I am using deepset/roberta-base-squad2 model of hugging face transformer for question answering in my chatbot in python, django framework, this model is working fine in local site but not working in live server

Below is my code

def Chatbot(request):
    if request.method == 'POST':
        question = request.POST['message']
        print(question)
        ans = ''
        data1 = Chat.objects.filter(message__icontains=question)
        for i in data1:
            print(i.response)
            ans += i.response
        print(ans)
        if ans:
            return JsonResponse({'response':ans})
        else:
            starting_time = time.time()
            print(starting_time)
            data = Chat.objects.get(id=1)
            passage = data.response 
            answerer = Extract_Answer()
            answer = answerer.Answer(question, passage)
            
            total_time = time.time() - starting_time
            print(total_time)
            if answer:
                print("Answer:", answer)
                return JsonResponse({'response':answer})
            else:
                return JsonResponse({'response':'Answer Not Found'})
    return render(request, 'chatbot.html')

class Extract_Answer():
    def __init__(self):
        self.model_name = "deepset/roberta-base-squad2"
        self.qa_pipeline = pipeline("question-answering", model=self.model_name, tokenizer=self.model_name)
    
    def Answer(self, question, passage):
    
        QA_input = {
            'question':question,
            'context':passage
        }
        res = self.qa_pipeline(QA_input)
        print(res)
        print(res['answer'])
        return res['answer'] 

Then this error I'm getting in live server

  Truncated or oversized response headers received from daemon process 'djangoproject': /var/www/html/humari/djangoproject/wsgi.py, referer: https://chat.humaricoding.com/
0

There are 0 answers