Skip to content

Car-ing is sharing, an auto dealership company for car sales and rental, is taking their services to the next level thanks to Large Language Models (LLMs).

As their newly recruited AI and NLP developer, you've been asked to prototype a chatbot app with multiple functionalities that not only assist customers but also provide support to human agents in the company.

The solution should receive textual prompts and use a variety of pre-trained Hugging Face LLMs to respond to a series of tasks, e.g. classifying the sentiment in a car’s text review, answering a customer question, summarizing or translating text, etc.

# Import necessary packages
import pandas as pd
import torch
from transformers import pipeline
import evaluate

from transformers import logging
logging.set_verbosity(logging.WARNING)
# Start your code here!

df_reviews = pd.read_csv('data/car_reviews.csv', sep=';')
print(df_reviews)
reviews = df_reviews['Review'].tolist()
print(reviews)
labels = df_reviews['Class'].tolist()
labels_binary = [1 if lab == 'POSITIVE' else 0 for lab in labels]
classifier = pipeline(task='text-classification', model='distilbert-base-uncased-finetuned-sst-2-english')
predicted_labels = classifier(reviews)
print(predicted_labels)
predictions = [pred['label'] for pred in predicted_labels]
print(predictions)
predictions = [1 if pred == 'POSITIVE' else 0 for pred in predictions]
print(predictions)
accuracy_metric = evaluate.load('accuracy')
f1_metric = evaluate.load('f1')
accuracy_result = accuracy_metric.compute(predictions=predictions, references=labels_binary)['accuracy']
f1_result = f1_metric.compute(predictions=predictions, references=labels_binary)['f1']
print(predictions, labels_binary)
print(accuracy_result)
print(f1_result)
print(f1_metric.description)
review_translate = reviews[0].split('.')[:2]
review_translate = review_translate[0] + '.' + review_translate[1] + '.'
print(reviews[0])
print(review_translate)

translator = pipeline(task='translation_en_to_es', model='Helsinki-NLP/opus-mt-en-es')
translated_review = translator(review_translate)[0]['translation_text']
print(translated_review)

references_translation = []
with open('data/reference_translations.txt', 'r') as file:
    ref = file.readline().strip()
    references_translation.append(ref)
    ref = file.readline().strip()
    references_translation.append(ref)
print(references_translation)

bleu_metric = evaluate.load('bleu')
bleu_score = bleu_metric.compute(predictions=[translated_review], references=[references_translation])
print(bleu_score)
review2 = reviews[1]
print(review2)

extractive_qa = pipeline(task='question-answering', model='deepset/minilm-uncased-squad2')
question = 'What did he like about the brand?'
context = review2
result_qa = extractive_qa(question=question, context=context)
print(result_qa)
answer = result_qa['answer']
print(answer)
last_review = reviews[-1]
print(last_review)

summarizer = pipeline(task='summarization', model='facebook/bart-large-cnn')
summarized_text = summarizer(last_review, min_length=50, max_length=55, clean_up_tokenization_spaces=True)[0]['summary_text']
print(summarized_text)