Skip to content

Car-ing is sharing, an auto dealership company for car sales and rental, is taking their services to the next level thanks to Large Language Models (LLMs).

As their newly recruited AI and NLP developer, you've been asked to prototype a chatbot app with multiple functionalities that not only assist customers but also provide support to human agents in the company.

The solution should receive textual prompts and use a variety of pre-trained Hugging Face LLMs to respond to a series of tasks, e.g. classifying the sentiment in a car’s text review, answering a customer question, summarizing or translating text, etc.

# Import necessary packages
import pandas as pd
import numpy as np
import torch
import evaluate
import re

from transformers import logging, pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM
logging.set_verbosity(logging.WARNING)
# Start your code here!
# READ IN THE CAR REVIEW CSV FILE
car_reviews = pd.read_csv("data/car_reviews.csv", sep=";")


# READ IN THE TRANSLATION TEXT FILE
text_open = open("data/reference_translations.txt", "r")
print(text_open.read())

# CREATE AN EMPTY LIST FOR WHICH TO STORE EACH LINE OF TEXT
translations = []

# READ THE TEXT FILE LINE BY LINE
text_open.seek(0)  # RESET THE FILE POINTER TO THE BEGINNING
for line in text_open:
    translations.append(line.strip())

# CLOSE THE TEXT SO THE FILE DOES NOT REMAIN OPEN
text_open.close()
# PRINT THE DATA AND EXAMINE

print(car_reviews)
print(translations)

Classify the sentiment of the reviews

# LOAD IN THE PIPELINE TO CONDUCT SENTIMENT ANALYSIS
classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")

# PULL THE REVIEWS INTO A LIST
car_review_list = [car_reviews.iloc[i,0] for i in range(len(car_reviews))]

# SUBMIT THE REVIEWS INTO THE PIPELINE
predicted_labels = classifier(car_review_list)

# CONVERT THE SENTIMENT LABEL TO A BINARY INT
predictions = [1 if pred["label"] == "POSITIVE" else 0 for pred in predicted_labels]
# EVALUATE THE MDOEL USING ACCURACY AND F1 SCORING
accuracy = evaluate.load("accuracy")
f1 = evaluate.load("f1")

# CREATE A LIST OF REFERENCES FOR THE TRUE LABELS
car_review_sentiment_references = [1 if car_reviews.iloc[i,1] == "POSITIVE" else 0 for i in range(len(car_reviews))]

accuracy_result = accuracy.compute(predictions=predictions, references=car_review_sentiment_references)["accuracy"]
f1_result = f1.compute(predictions=predictions, references=car_review_sentiment_references)["f1"]

Translate the first two sentances from English to Spanish

# GRAB THE FIRST TWO SENTANCES OF THE FIRST REVIEW
first_review_two_sentence = (".".join(car_reviews.iloc[0,0]
                                      .split(".")[0:2]
                                     ) + "."
)

# CREATE A LIST OF THE FIRST TWO SENTENCES TWICE - TO BE COMPARED TO TWO VERSION OF THE TRANSLATION
review_sentences = []
for i in range(2):
    review_sentences.append(first_review_two_sentence)


# LOAD IN THE PIPELINE AND THE ASSOCIATED TRANSLATION MODEL
translator = pipeline("translation_en_to_es", model = "Helsinki-NLP/opus-mt-en-es")

# TRANASLATE THE REVIEW FROM ENGLISH TO SPANISH AND STORE IN A TRANSLATED_REVIEW
translated_review = translator(first_review_two_sentence)[0]["translation_text"]
# translated_review = [i["translation_text"] for i in translated_output]


# LOAD IN THE BLEU COMPUTE EVALUATOR AND ASSESS THE TRANSLATION AGIANST THE REFERENCE
  # ASSIGN THE BLEU SCORE TO BLEU_SCORE
bleu = evaluate.load("bleu")
bleu_score = bleu.compute(predictions = [translated_review], references = [translations])
bleu_metric = bleu_output["bleu"]

Generate an extractive Q&A instance

# EXTRACT THE SECOND REVIEW FOR THE REVIEWS DATASET
context = car_reviews.iloc[1,0]

# SET UP A MODEL TO TAKE A QUESITON AND GIVE A RESPONSE
qa_model = pipeline("question-answering", model="deepset/minilm-uncased-squad2")

# ASSIGN THE QUESTION TO A VALUE
question = "What did he like about the brand?"

# RUN THE MODEL AND PRODUCE A RESPONSE
answer = qa_model(question=question, context=context)["answer"]

Summarize the last review in the dataset to ~50-55 token long

# GRAB THE LAST REVIEW IN THE DATASET
last_review = car_reviews.iloc[-1,0]


# GENERATE TOKENIZER AND MODEL TO DYNAMICALLY END THE SUMMARY AT THE LAST FULL SENTENCE
summary_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
summary_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")

# FIND THE MAX TOKEN LENGTH FROM THE LAST REVIEW
review_token_length = len(summary_tokenizer.encode(last_review, truncation=False))

# CREATE THE TOKENIZER
tokenize = summary_tokenizer(last_review, max_length=review_token_length, return_tensors="pt", truncation=True)

# CALL THE MODEL WITH FINE-TUNED PARAMETERS. SET THE MODEL TO A MAX-LENGTH OF 55 TOKENS
review_summary = summary_model.generate(
    tokenize["input_ids"]
    ,min_length=25
    ,max_length=55
    ,early_stopping=True
    ,eos_token_id=summary_tokenizer.eos_token_id
    ,no_repeat_ngram_size=3
)

# DECODE THE SUMMARIZED THE REVIEW - SUMMARY STILL CUTS OFF MID SENTENCE
summarized_output = summary_tokenizer.decode(review_summary[0], skip_special_tokens=True)
# FINE-TUNING STILL NOT STOPPING AT MOST RECENT SENTENCE END. USE REGEX TO MAKE IT DYNAMIC
summary_cleaned = []
summary_list = [summarized_output]

# CRETE A FOR LOOP TO ITERATE OVER ALL SUMMARIES IN THE LIST
for i in range(len(summary_list)):
    # LOOK FOR MOST RECENT SENTENCE END (. ? !)
    matches = list(re.finditer(r'[.!?](?=\s|$)', summary_list[i]))
    
    if matches:
        # IF SUMMARY CUTS OFF, RETURN UP TO MOST RECENT SENTENCE PUNCTUATION
        last_end = matches[-1].end()
        text = summary_list[i][:last_end]
    else:
        # OTEHRWISE, RETURN ORIGINAL TEXT
        text = summary_list[i]
    summary_cleaned.append(text)

# ASSIGN THE SUMMARY TO SUMMARIZED TEXT
summarized_text = summary_cleaned[0]