Skip to content
Github Profile LOC Tracker
!pip install beautifulsoup4
!pip install requests
!pip install pillow
Hidden output
# Accept username input
username = input("Enter your GitHub username: ")
Hidden output
from bs4 import BeautifulSoup
import requests
from PIL import Image, ImageDraw, ImageFont
from collections import Counter
from io import BytesIO
import IPython.display
includes = (".py", ".pyc", ".pyo", ".pyd", ".java", ".class", ".jar", ".js", ".mjs", ".cjs", ".jsx", ".ts", ".tsx", ".c", ".h", ".cpp", ".cxx", ".cc", ".hpp", ".hxx", ".cs", ".dart", ".gd", ".rb", ".rake", ".gemspec", ".php", ".php3", ".php4", ".php5", ".phtml", ".swift", ".go", ".rs", ".kt", ".kts", ".ts", ".tsx", ".html", ".htm", ".xhtml", ".css", ".sql", ".sh", ".bash", ".zsh", ".pl", ".pm", ".r", ".R", ".rdata", ".rds", ".m", ".mat", ".mdl", ".m", ".h", ".scala", ".sc", ".groovy", ".gvy", ".gy", ".asm", ".s", ".vb", ".vba", ".vbs", ".fs", ".fsx", ".fsi", ".pl", ".lisp", ".lsp", ".cl", ".scm", ".ss", ".tcl", ".as", ".cls", ".trigger", ".sol", ".ino", ".tex", ".sty", ".cls", ".sb", ".sb2", ".sb3")
# Send the GET request
html_source = requests.get(f"https://github.com/{username}?tab=repositories").text
soup = BeautifulSoup(html_source, "lxml")
profile = soup.find_all("li", class_="col-12 d-flex flex-justify-between width-full py-4 border-bottom color-border-muted public source")
# Check the status code of the response
if html_source:
repo_list = []
new_repo_list = []
timestamps = []
languages = []
new_total = 0
for i in profile:
found_lang = i.find("span", itemprop="programmingLanguage")
if found_lang is not None:
languages.append(found_lang.get_text())
# print(languages)
timestamps.append(i.find("relative-time").text)
last_updated = i.find("relative-time").text
if "4" == last_updated[-1]:
new_repo_list.append(i.find("a"))
# print("This year:")
for html_tag in new_repo_list:
repo_name = html_tag.get("href")
url = f"https://api.github.com/repos{repo_name}/contents"
def get_contents(url):
response = requests.get(url, headers={'Authorization': 'Token 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'})
if response.status_code != 200:
print(f"Error fetching {url}: {response.status_code}")
return []
contents = response.json()
return contents
# Only counts this year's LOC
def process_contents(contents, total):
global new_total
for item in contents:
if item['type'] == 'file':
# Only count lines in certain file types (e.g., .py, .js, .java, etc.)
if item['name'].endswith(includes):
file_url = item['download_url']
file_response = requests.get(f"{file_url}", headers=headers)
if file_response.status_code == 200:
new_total += int(len(file_response.text.splitlines()))
elif item['type'] == 'dir':
# Recursively process directories
process_contents(get_contents(item['url']), total)
# Start processing the repository contents
process_contents(get_contents(url), new_total)
# print('You wrote ', new_total, ' lines of code this year!')
else:
# If the request was not successful, print the status code
print('Failed to retrieve data')
counter = Counter(languages)
fav_lang = counter.most_common()[:5]
image_url = "https://drive.usercontent.google.com/u/1/uc?id=1dS9vOVdmAe6_EVZa1foetLGcfQ2BTIYY&export=download"
response = requests.get(image_url, headers=headers)
img = Image.open(BytesIO(response.content))
# Get a drawing context
d = ImageDraw.Draw(img)
# Draw the text on the image
d.text((100, 50), "Github Wrapped 2024", fill="white", font_size=80)
d.text((100, 200), f"Congrats, {username}!", fill="white", font_size=30)
d.text((100, 250), f"You've written {new_total} lines of code this year!", fill="white", font_size=30)
d.text((100, 350), f"Your favourite programming/scripting language is {fav_lang[0][0]}", fill="white", font_size=30)
mult = 1
for i in fav_lang[1:]:
d.text((100, 400+(50*mult)), f"{i[0]}", fill="white", font_size=30)
mult += 1
display(img)