Spaces:
Build error
Build error
initial setup
Browse files- requirements.txt +2 -0
- resume_customizer.py +123 -0
- samples/additional_resume_context.txt +34 -0
- samples/cover_letter_sample.txt +11 -0
- samples/freeform_context.txt +5 -0
- samples/full_cv.yaml +219 -0
- samples/job_description.txt +96 -0
- samples/summary_sample.txt +5 -0
- utils.py +369 -0
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
anthropic
|
resume_customizer.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import yaml
|
| 3 |
+
import os
|
| 4 |
+
from utils import process_ksas, process_analysis, process_core_content, process_summary, _cover_letter
|
| 5 |
+
|
| 6 |
+
def load_txt_file(txt_file):
|
| 7 |
+
with open(txt_file, 'r', encoding='utf-8') as f:
|
| 8 |
+
txt_str_contents = f.read()
|
| 9 |
+
return txt_str_contents
|
| 10 |
+
|
| 11 |
+
def load_resume(uploaded_file):
|
| 12 |
+
try:
|
| 13 |
+
if hasattr(uploaded_file, 'name'): # Handle uploaded file
|
| 14 |
+
with open(uploaded_file.name, 'r', encoding='utf-8') as f:
|
| 15 |
+
data = yaml.safe_load(f)
|
| 16 |
+
else: # Handle string path
|
| 17 |
+
with open(uploaded_file, 'r', encoding='utf-8') as f:
|
| 18 |
+
data = yaml.safe_load(f)
|
| 19 |
+
|
| 20 |
+
if not isinstance(data, dict):
|
| 21 |
+
return None, f"Error: Resume data is not a dictionary (got {type(data)})"
|
| 22 |
+
|
| 23 |
+
return data, "Successfully loaded resume data"
|
| 24 |
+
except Exception as e:
|
| 25 |
+
return None, f"Error loading resume: {e}"
|
| 26 |
+
|
| 27 |
+
def load_initial_resume(file_path):
|
| 28 |
+
try:
|
| 29 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 30 |
+
resume_data = yaml.safe_load(f)
|
| 31 |
+
if not isinstance(resume_data, dict):
|
| 32 |
+
print(f"Warning: Resume data is not a dictionary: {type(resume_data)}")
|
| 33 |
+
return None
|
| 34 |
+
return resume_data
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"Error loading initial resume: {e}")
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
with gr.Blocks() as demo:
|
| 40 |
+
gr.Markdown('''# Resume Generator
|
| 41 |
+
|
| 42 |
+
This tool generates a tailored resume and cover letter for a job description using Anthropic's LLM API.
|
| 43 |
+
Instead of doing this in one shot, each step of the process is broken down into separate blocks to allow for manual edits between steps. Edit the outputs as you go to align with historical accuracy and with your writing preferences.
|
| 44 |
+
Sample files have been provided for each input to help you get started. You can upload your own files to replace the samples.
|
| 45 |
+
|
| 46 |
+
## Instructions
|
| 47 |
+
1. Ensure you have your Anthropic API Key added to your HF account secrets.
|
| 48 |
+
2. Upload all of the following files:
|
| 49 |
+
- Job Description: a .txt file containing the job description.
|
| 50 |
+
- Resume YAML: a .yaml file containing your work experience. See the provided template for formatting structure. Work experience bullets are grouped by job and category, where multiple variations of bullets may be stored to provide additional texture to the LLM.
|
| 51 |
+
- Additional Resume Context: a .txt file containing additional resume details that you don't want to tailor, but want to share with the LLM as extra context.
|
| 52 |
+
- Additional Freeform Context: a .txt file that elaborates on resume details to support the LLM. This information wouldn't normally be included on the resume. It can be unpolished writing and brutally honest background details that prevent the LLM from stretching the truth.
|
| 53 |
+
- Resume Summary Writing Sample: a .txt file containing a writing sample of a bullet list summary for the top of your resume.
|
| 54 |
+
- Cover Letter Writing Sample: a .txt file containing a writing sample of a cover letter.
|
| 55 |
+
''')
|
| 56 |
+
|
| 57 |
+
### File management
|
| 58 |
+
|
| 59 |
+
job_desc_file = gr.File(label="Job Description", value="job_description.txt")
|
| 60 |
+
resume_file = gr.File(label="Resume YAML", value="full_cv.yaml")
|
| 61 |
+
extra_resume_context_file = gr.File(label="Additional Resume Context", value="additional_resume_context.txt")
|
| 62 |
+
freeform_context_file = gr.File(label="Additional Freeform Context", value="freeform_context.txt")
|
| 63 |
+
summary_sample_file = gr.File(label="Resume Summary Writing Sample", value="summary_sample.txt")
|
| 64 |
+
cover_letter_sample_file = gr.File(label="Cover Letter Writing Sample", value="cover_letter_sample.txt")
|
| 65 |
+
|
| 66 |
+
# Create States that store ontent as variables
|
| 67 |
+
job_desc_content = gr.State(load_txt_file("job_description.txt"))
|
| 68 |
+
resume_content = gr.State(load_initial_resume("full_cv.yaml"))
|
| 69 |
+
extra_resume_context_content = gr.State(load_txt_file("additional_resume_context.txt"))
|
| 70 |
+
freeform_context_content = gr.State(load_txt_file("freeform_context.txt"))
|
| 71 |
+
summary_sample_content = gr.State(load_txt_file("summary_sample.txt"))
|
| 72 |
+
cover_letter_sample_content = gr.State(load_txt_file("cover_letter_sample.txt"))
|
| 73 |
+
|
| 74 |
+
# If a file is uploaded, automatically update the respective variable storing its contents
|
| 75 |
+
job_desc_file.upload(fn=load_txt_file, inputs=[job_desc_file], outputs=[job_desc_content])
|
| 76 |
+
resume_file.upload(fn=load_resume, inputs=[resume_file], outputs=[resume_content])
|
| 77 |
+
extra_resume_context_file.upload(fn=load_txt_file, inputs=[extra_resume_context_file], outputs=[extra_resume_context_content])
|
| 78 |
+
freeform_context_file.upload(fn=load_txt_file, inputs=[freeform_context_file], outputs=[freeform_context_content])
|
| 79 |
+
summary_sample_file.upload(fn=load_txt_file, inputs=[summary_sample_file], outputs=[summary_sample_content])
|
| 80 |
+
cover_letter_sample_file.upload(fn=load_txt_file, inputs=[cover_letter_sample_file], outputs=[cover_letter_sample_content])
|
| 81 |
+
|
| 82 |
+
# Extract KSAs
|
| 83 |
+
ksas_content = gr.State(None)
|
| 84 |
+
ksas_btn = gr.Button("1. Extract KSAs")
|
| 85 |
+
ksas_output = gr.Textbox(label="Extracted KSAs", lines=10, interactive=True)
|
| 86 |
+
ksas_btn.click(process_ksas, inputs=[job_desc_content], outputs=[ksas_output])
|
| 87 |
+
ksas_output.change(lambda x: x, ksas_output, ksas_content)
|
| 88 |
+
|
| 89 |
+
# Analyze first to determine whether or not to proceed
|
| 90 |
+
analyze_btn = gr.Button("2. Analyze Fit")
|
| 91 |
+
analysis_output = gr.Textbox(label="Analysis & Recommendation", lines=10, interactive=True)
|
| 92 |
+
analyze_btn.click(process_analysis, inputs=[resume_content, job_desc_content, ksas_content, freeform_context_content], outputs=analysis_output)
|
| 93 |
+
|
| 94 |
+
# Generate core resume content (experience bullets and technical skills)
|
| 95 |
+
core_content_btn = gr.Button("3. Generate Core Content")
|
| 96 |
+
|
| 97 |
+
exp_content = gr.State(None)
|
| 98 |
+
exp_output = gr.Textbox(label="Experience Bullets", lines=10, interactive=True)
|
| 99 |
+
exp_output.change(lambda x: x, exp_output, exp_content)
|
| 100 |
+
|
| 101 |
+
skills_content = gr.State(None)
|
| 102 |
+
skills_output = gr.Textbox(label="Technical Skills", lines=5, interactive=True)
|
| 103 |
+
skills_output.change(lambda x: x, skills_output, skills_content)
|
| 104 |
+
|
| 105 |
+
core_content_btn.click(process_core_content, inputs=[resume_content, job_desc_content, ksas_content, freeform_context_content], outputs=[exp_output, skills_output])
|
| 106 |
+
|
| 107 |
+
# Generate summary
|
| 108 |
+
summary_content = gr.State(None)
|
| 109 |
+
summary_btn = gr.Button("4. Generate Summary")
|
| 110 |
+
summary_output = gr.Textbox(label="Summary", lines=8, interactive=True)
|
| 111 |
+
summary_btn.click(process_summary, inputs=[job_desc_content, ksas_content, exp_content, skills_content, extra_resume_context_content, summary_sample_content], outputs=[summary_output])
|
| 112 |
+
summary_output.change(lambda x: x, summary_output, summary_content)
|
| 113 |
+
|
| 114 |
+
# Generate cover letter
|
| 115 |
+
cover_letter_content = gr.State(None)
|
| 116 |
+
cover_letter_btn = gr.Button("5. Generate Cover Letter")
|
| 117 |
+
cover_letter_output = gr.Textbox(label="Cover Letter", lines=10, interactive=True)
|
| 118 |
+
cover_letter_btn.click(_cover_letter, inputs=[job_desc_content, ksas_content, exp_content, skills_content, extra_resume_context_content, summary_content, cover_letter_sample_content], outputs=[cover_letter_output])
|
| 119 |
+
cover_letter_output.change(lambda x: x, cover_letter_output, cover_letter_content)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
demo.launch(debug=True)
|
samples/additional_resume_context.txt
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OTHER PROFESSIONAL EXPERIENCE
|
| 2 |
+
|
| 3 |
+
Data Analyst (Co-op)
|
| 4 |
+
ABC Foods
|
| 5 |
+
Aug 2012 – Dec 2012
|
| 6 |
+
|
| 7 |
+
Project Analyst (Co-op)
|
| 8 |
+
A1 Grocery
|
| 9 |
+
Jan 2012 – Apr 2012
|
| 10 |
+
|
| 11 |
+
Accounting Assistant (Co-op)
|
| 12 |
+
ReloCorp
|
| 13 |
+
May 2011 – Aug 2011
|
| 14 |
+
|
| 15 |
+
EDUCATION
|
| 16 |
+
|
| 17 |
+
Master of Management in Operations Research
|
| 18 |
+
Sep 2015 – Dec 2016
|
| 19 |
+
|
| 20 |
+
Bachelor of Mathematics
|
| 21 |
+
Sep 2010 – Jun 2015
|
| 22 |
+
Operations Research, Statistics Minor, Co-op, Dean’s Honours List
|
| 23 |
+
|
| 24 |
+
ADDITIONAL ASSETS
|
| 25 |
+
|
| 26 |
+
Microsoft Certified: Azure Data Scientist Associate
|
| 27 |
+
Aug 2024
|
| 28 |
+
- Validated expertise in designing, implementing, and monitoring machine learning solutions in Azure.
|
| 29 |
+
|
| 30 |
+
Toastmasters Club
|
| 31 |
+
Sep 2018 – Sep 2024
|
| 32 |
+
- Engage weekly to enhance public speaking, impromptu speaking, and leadership skills
|
| 33 |
+
- Held the role of Vice President of Education (2019/2020), managing the club’s educational program and guiding new members
|
| 34 |
+
- Served as President (2020/2021), leading the executive team in executing the Club Success Plan and driving club growth
|
samples/cover_letter_sample.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Dear Hiring Manager,
|
| 2 |
+
|
| 3 |
+
I’m writing to express my keen interest in the Senior Data Scientist position. Having significantly contributed to the data science practice at Workers' Compensation Board, particularly within the realm of prescriptive modelling, I’m excited about the opportunity this role presents.
|
| 4 |
+
|
| 5 |
+
Recently I led a critical three-sprint action recommendation initiative, marking a notable advancement in our practice and showcasing the impactful contributions I can offer in a senior capacity. This initiative's success is a testament to the value I can add as a Senior Data Scientist.
|
| 6 |
+
|
| 7 |
+
Throughout my career, advisory roles have naturally gravitated towards me, allowing me to offer insights and guidance beyond my formal job descriptions. This has been increasingly evident at Workers' Compensation Board. My time at Healthy Authority reinforces this, where I was a key advisor within the Data & Analytics department and led major analytics initiatives, including a chemotherapy scheduling optimization model now integral to a web application used across facilities.
|
| 8 |
+
|
| 9 |
+
My goal is to lead a technical team in high-value projects that drive decision-making at all organizational levels. I believe this role aligns perfectly with my career progression. I’m particularly passionate about harnessing the power of models for positive impact, a vision that resonates with Megacorp’s values.
|
| 10 |
+
|
| 11 |
+
I’m eager to discuss how my experience and perspective can contribute to the continued success and innovation of the team. Thank you for considering my application.
|
samples/freeform_context.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFORMAL FREEFORM CONTEXT
|
| 2 |
+
- I don’t know if I have direct experience with economics or econometrics. The closest thing I can think of is working with causal machine learning and experimentation at Workers' Compensation Board. I don’t know if that is valid experience or if that’s what they are looking for. I might need help workshopping this.
|
| 3 |
+
- I don’t have direct experience with marketplace or incentive design. The only thing I can think of is with optimization and thinking of good objective functions to optimize towards, whether it’s a tangible resource like cost, or causally inferring something like the duration of a claim. I think this could be a weak point too. I really can’t even think of indirect experience with these areas.
|
| 4 |
+
- I’ve never worked on real-world problems applying economic analysis
|
| 5 |
+
- I have tons of mentoring and leadership experience. At Workers' Compensation Board I led the prescriptive analytics initiative where there were 6 team members and I coordinated the work, as well as drove it forward myself. At Health Authority as a Sr Business Analyst, I also led a team of analysts for the redevelopment projects. I’ve often been in instances where I mentored and led junior analysts.
|
samples/full_cv.yaml
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
experience:
|
| 2 |
+
- company: "Workers' Compensation Board"
|
| 3 |
+
title: "Data Scientist"
|
| 4 |
+
dates: "2022-Present"
|
| 5 |
+
bullets:
|
| 6 |
+
- headline: "Led department's prescriptive analytics initiative for claims intervention optimization"
|
| 7 |
+
variations:
|
| 8 |
+
- "Spearheaded a department initiative to develop MVP action recommendation models, including the creation of automated processes, standardization of patterns and templates, and the implementation of robust evaluation and output surfacing techniques"
|
| 9 |
+
- "Guided a new prescriptive analytics initiative to recommend optimal interventions, applying causal inference on past experiment data to identify which claims would most benefit from early return-to-work planning"
|
| 10 |
+
- "Championed causal machine learning modeling to recommend optimal interventions in claims management, improving decision-making processes and driving significant business impact"
|
| 11 |
+
- "Led the department's new prescriptive analytics initiative to apply causal inference and machine learning techniques to identify the best interventions for a decision in a claim's life cycle"
|
| 12 |
+
- "Incorporated a risk-adjusted strategy to ensure recommended actions balanced potential gains against model uncertainty, drawing inspiration from mean-variance approaches"
|
| 13 |
+
|
| 14 |
+
- headline: "Led design and implementation of claims workload allocation tool"
|
| 15 |
+
variations:
|
| 16 |
+
- "Led the design and implementation of a workload allocation tool for return-to-work planning, utilizing causal machine learning and optimization techniques, projected to shorten claim durations by an average of five days per person"
|
| 17 |
+
- "Led the design of a workload allocation tool for return-to-work planning for injured workers expected to shorten the duration of claims by an average of 5 days per person"
|
| 18 |
+
- "Developed risk-adjusted workload allocation strategies balancing potential gains against model uncertainty, drawing inspiration from mean-variance approaches"
|
| 19 |
+
- "Led the design and implementation of a workload allocation tool for return-to-work planning, utilizing optimization techniques projected to reduce claim durations by an average of 5 days"
|
| 20 |
+
|
| 21 |
+
- headline: "Managed and deployed production machine learning systems"
|
| 22 |
+
variations:
|
| 23 |
+
- "Oversaw the maintenance and security of 10+ automated models within MS Azure Machine Learning"
|
| 24 |
+
- "Deployed multiple machine learning models into production using Microsoft Azure Machine Learning, ensuring continuous improvement and operational efficiency"
|
| 25 |
+
- "Designed, developed, and deployed multiple machine learning models using Python and Azure Machine Learning, improving decision-making processes in claims management"
|
| 26 |
+
- "Deployed multiple machine learning models with Azure ML, overseeing updates and feature adjustments to maintain alignment with evolving business needs"
|
| 27 |
+
- "Adapted an existing multi-label deep learning model used for claims coding and retrained it on COVID-19 claims to further accelerate manual claims coding efforts"
|
| 28 |
+
- "Refined and adapted a multi-label deep learning model for efficient COVID-19 claims coding, significantly improving manual coding processes"
|
| 29 |
+
|
| 30 |
+
- headline: "Developed experimentation platform and RAG application"
|
| 31 |
+
variations:
|
| 32 |
+
- "Led the development of an experimentation application to reduce overhead challenges in conducting business experiments to test new processes"
|
| 33 |
+
- "Developed a RAG application to enable a chat interface to chat with our department's measures and solutions data, greatly reducing time to finding answers to data questions"
|
| 34 |
+
- "Initiated the first instance of ongoing experimentation for return-to-work planning"
|
| 35 |
+
- "Led the development of an experimentation platform to streamline business experiments, reducing overhead and accelerating the testing of new processes"
|
| 36 |
+
|
| 37 |
+
- headline: "Led team mentorship and cross-organizational collaboration"
|
| 38 |
+
variations:
|
| 39 |
+
- "Mentored team members to help them support the prescriptive analytics initiatives to deliver on department goals"
|
| 40 |
+
- "Regularly communicate with business stakeholders to ensure alignment of work and goals"
|
| 41 |
+
- "Enhanced the collaborative relationship between the data science team and Claims & Rehabilitation, leading monthly data science sessions for effective project feedback"
|
| 42 |
+
- "Conducted biweekly research on emerging tools and techniques, sharing insights to drive collective growth and innovation within the data science team"
|
| 43 |
+
- "Assisted analytics teams at other workers' compensation boards in Canada by sharing our team's proven strategies and solutions, enhancing their capabilities in advanced analytics"
|
| 44 |
+
- "Mentored and led junior analysts, fostering team growth and skill development, enhancing the team's capability to deliver on department goals"
|
| 45 |
+
- "Translated complex analytical insights to technical and non-technical stakeholders, ensuring alignment with business objectives"
|
| 46 |
+
- "Collaborated with stakeholders across departments to align machine learning solutions with business goals, enhancing the impact of analytics projects"
|
| 47 |
+
- "Provided expert consulting in advanced analytics to internal teams, facilitating data-driven decision-making across the organization"
|
| 48 |
+
- "Communicated model insights and potential business impacts to senior leaders and managers, securing buy-in for integrating analytics-driven recommendations"
|
| 49 |
+
|
| 50 |
+
- headline: "Applied advanced analytics and causal inference techniques"
|
| 51 |
+
variations:
|
| 52 |
+
- "Collaborated with stakeholders to build a causal machine learning recommendation model for physiotherapy extension decisions which informed the department approach to prescriptive modelling"
|
| 53 |
+
- "Engineered and automated a causal machine learning model for physiotherapy decisions, significantly enhancing the department's approach to prescriptive modelling"
|
| 54 |
+
- "Applied econometric and causal inference techniques using Python's DoWhy and econml packages on observational data, accounting for bias and enhancing model reliability"
|
| 55 |
+
- "Applied causal machine learning techniques using Python's DoWhy and econml packages on experimental and observational datasets to support organizational decision-making"
|
| 56 |
+
- "Utilized Python-based causal ML frameworks to analyze observational and experimental data. Ensured outputs were understandable and actionable, bridging technical models with business-friendly explanations"
|
| 57 |
+
- company: "Health Authority"
|
| 58 |
+
title: "Senior Business Analyst"
|
| 59 |
+
dates: "2018-2022"
|
| 60 |
+
bullets:
|
| 61 |
+
- headline: "Led chemotherapy scheduling optimization initiatives"
|
| 62 |
+
variations:
|
| 63 |
+
- "Validated, improved and implemented a chemotherapy scheduling optimization model designed for a decision support web application with clinical and operational leads"
|
| 64 |
+
- "Developed and implemented a CPLEX chemotherapy patient scheduling constraint programming model, in tandem with clinical and operational stakeholders, that maximizes staff utilization and reduces manual scheduling labour"
|
| 65 |
+
- "Created and deployed a chemotherapy scheduling optimization model, aligning with clinical and operational leads to boost staff efficiency and minimize manual scheduling"
|
| 66 |
+
- "Developed and integrated optimization models into clinical decision-support tools for chemotherapy scheduling. Prioritized objectives—staff capacity, fairness, safety, and patient preferences—and balanced these constraints within a single framework"
|
| 67 |
+
|
| 68 |
+
- headline: "Developed critical healthcare infrastructure forecasting methodologies"
|
| 69 |
+
variations:
|
| 70 |
+
- "Formulated, presented and documented a forecasting methodology for BC Cancer to use for budget and infrastructure planning that was approved by leadership and the BC Ministry of Health"
|
| 71 |
+
- "Created long-term demand projections for critical cancer treatments and infrastructure needs, securing leadership and ministry-level approvals. These forecasts influenced multimillion-dollar capital planning decisions and ensured the province met future patient demand"
|
| 72 |
+
- "Built forecasting models (e.g., auto ARIMA, other statistical methods) to guide strategic planning. Translated technical forecasts into clear narratives for executives, ensuring evidence-based policy and infrastructure investment decisions"
|
| 73 |
+
- "Devised and secured approval for a new forecasting methodology for infrastructure planning, contributing to more accurate resource allocation and securing over $1B in funding"
|
| 74 |
+
|
| 75 |
+
- headline: "Led analytics team for capital planning and infrastructure projects"
|
| 76 |
+
variations:
|
| 77 |
+
- "Led a team of analysts to forecast capital requirements for multiple cancer centre redevelopment projects and expansion initiatives, a collaborative effort between our analytics team and program stakeholders"
|
| 78 |
+
- "Directed an analyst team in forecasting capital needs for cancer center redevelopment, collaborating with key stakeholders"
|
| 79 |
+
- "Led a team of two analysts in forecasting capital requirements for cancer center redevelopment projects, collaborating closely with program stakeholders"
|
| 80 |
+
- "Led a team of analysts in forecasting capital requirements for multiple cancer center redevelopment projects, securing over $1B in funding"
|
| 81 |
+
|
| 82 |
+
- headline: "Implemented healthcare optimization and ML models"
|
| 83 |
+
variations:
|
| 84 |
+
- "Designed and implemented an integer programming optimization model in GAMS to inform the deployment order for a new clinical information system across BC Cancer centres"
|
| 85 |
+
- "Developed supervised machine learning models for predicting PET scan times, identifying and addressing clinical bottlenecks"
|
| 86 |
+
- "Created supervised machine learning models to predict PET scan times, identifying clinical bottlenecks and improving patient throughput"
|
| 87 |
+
- "Developed and implemented constraint programming and integer programming models using CPLEX and GAMS for chemotherapy patient scheduling and clinical information system deployment"
|
| 88 |
+
|
| 89 |
+
- headline: "Led COVID-19 PPE supply chain modeling"
|
| 90 |
+
variations:
|
| 91 |
+
- "Designed supply and demand models forecasting PPE inventory requirements at the onset of the COVID-19 pandemic to assist decisions made by supply chain colleagues"
|
| 92 |
+
- "Engineered models to forecast PPE inventory requirements at the onset of COVID-19, supporting critical supply chain decisions"
|
| 93 |
+
- "Engineered models to forecast PPE inventory requirements during COVID-19, supporting critical supply chain decisions"
|
| 94 |
+
- "Engineered models to forecast PPE inventory requirements during COVID-19 and projected demand for services, aiding in critical supply chain and infrastructure decisions"
|
| 95 |
+
|
| 96 |
+
- headline: "Automated data processes and reporting"
|
| 97 |
+
variations:
|
| 98 |
+
- "Programmed R, Python and SQL scripts to automate common data requests, leveraging data science packages such as tidyverse, NumPy and Pandas"
|
| 99 |
+
- "Collaborated with the cancer surgery program by producing reports using data from the a surgical patient registry"
|
| 100 |
+
- "Automated data requests by developing scripts in R, Python, and SQL, reducing data retrieval time and enabling faster decision-making"
|
| 101 |
+
- "Automated complex data workflows using Python, R, and SQL to expedite analyses and reduce manual effort"
|
| 102 |
+
- company: "Health Authority"
|
| 103 |
+
title: "Operations Research Project Analyst"
|
| 104 |
+
dates: "2016-2017"
|
| 105 |
+
bullets:
|
| 106 |
+
- headline: "Led radiation therapy scheduling optimization initiatives"
|
| 107 |
+
variations:
|
| 108 |
+
- "Led the design and development of a GAMS optimization model for radiation therapy scheduling that improved patient satisfaction of appointment time bookings"
|
| 109 |
+
- "Led the design and development of a radiation therapy scheduling optimization model, achieving a 10% increase in patient appointment time preferences met"
|
| 110 |
+
- "Designed and developed optimization models for radiation therapy scheduling, improving patient appointment satisfaction and resource utilization"
|
| 111 |
+
- "Developed optimization models for radiation therapy scheduling, improving patient appointment satisfaction and resource utilization"
|
| 112 |
+
- "Designed and developed optimization models for radiation therapy scheduling, improving patient appointment satisfaction by 10%"
|
| 113 |
+
- "Designed and validated optimization and simulation models for radiation therapy scheduling. Improved patient satisfaction by enhancing alignment between resource availability and appointment time preferences"
|
| 114 |
+
|
| 115 |
+
- headline: "Developed and validated simulation models for scheduling analysis"
|
| 116 |
+
variations:
|
| 117 |
+
- "Programmed and validated a simulation model that tested the performance of different scheduling heuristics and optimization scheduling models for radiation therapy treatment"
|
| 118 |
+
- "Developed and validated a simulation model to assess various scheduling policies for radiation therapy, comparing heuristics and optimization models"
|
| 119 |
+
- "Programmed and validated simulation models to test the performance of different scheduling heuristics and optimization models, enhancing operational efficiency"
|
| 120 |
+
- "Programmed and validated simulation models to test the performance of different scheduling heuristics and optimization methods"
|
| 121 |
+
- "Programmed and validated simulation models to assess scheduling policies for radiation therapy, enhancing operational efficiency"
|
| 122 |
+
- "Tested alternative scheduling policies using simulation, informing clinical teams about operational trade-offs and potential efficiency gains"
|
| 123 |
+
|
| 124 |
+
- headline: "Maintained production decision support systems"
|
| 125 |
+
variations:
|
| 126 |
+
- "Performed maintenance on decision support applications currently in production for clinical stability"
|
| 127 |
+
- company: "ABC Consulting"
|
| 128 |
+
title: "Project Analyst"
|
| 129 |
+
dates: "2016"
|
| 130 |
+
bullets:
|
| 131 |
+
- headline: "Led mining operations optimization project"
|
| 132 |
+
variations:
|
| 133 |
+
- "Collaborated with mining experts to improve the productivity of their gold mine"
|
| 134 |
+
- "Identified bottlenecks within the haul truck transport operations using co-ordinates data of the trucks"
|
| 135 |
+
- "Designed a staggered employee shift schedule that could increase production by 1.2M tonnes annually"
|
| 136 |
+
|
| 137 |
+
- headline: "Developed technical documentation and analysis tools"
|
| 138 |
+
variations:
|
| 139 |
+
- "Produced SQL scripts for the client to reproduce the analysis in the future"
|
| 140 |
+
- "Authored an extensive technical report to highlight the team's findings and recommendations"
|
| 141 |
+
- company: "Food Manufacturer"
|
| 142 |
+
title: "Operations Research Analyst"
|
| 143 |
+
dates: "2014-2015"
|
| 144 |
+
bullets:
|
| 145 |
+
- headline: "Developed automated shipment optimization solution"
|
| 146 |
+
variations:
|
| 147 |
+
- "Researched, designed and implemented a tabu search heuristic to solve an optimization model that automated the weekly process of packing 300+ US customer orders into trucks and scheduling delivery appointment times at minimum cost"
|
| 148 |
+
- "Innovatively applied a tabu search heuristic to automatically optimize the packing and scheduling of 300+ US customer orders weekly, significantly reducing costs"
|
| 149 |
+
- "Implemented a tabu search heuristic to automate the weekly packing and scheduling of over 300 U.S. customer orders into trucks, reducing costs and manual planning efforts"
|
| 150 |
+
- "Created a tabu search heuristic to automate packing and scheduling of over 300 weekly U.S. customer orders, minimizing transportation costs and manual planning effort"
|
| 151 |
+
|
| 152 |
+
- headline: "Led stakeholder consultation and process analysis"
|
| 153 |
+
variations:
|
| 154 |
+
- "Consulted with transportation experts to understand how the shipment load plan for trucks was manually completed"
|
| 155 |
+
- "Engaged with stakeholders to analyze the manual shipment load planning, laying the groundwork for optimization solutions"
|
| 156 |
+
- "Consulted with transportation experts to understand and improve manual shipment load planning processes"
|
| 157 |
+
|
| 158 |
+
- headline: "Automated operational reporting"
|
| 159 |
+
variations:
|
| 160 |
+
- "Automated reports for operations managers to monitor the utilization of production lines"
|
| 161 |
+
- "Automated operational reports for managers using Excel VBA and SQL"
|
| 162 |
+
- company: "Government Agency"
|
| 163 |
+
title: "Research Analyst (Co-op)"
|
| 164 |
+
dates: "Earlier Position"
|
| 165 |
+
bullets:
|
| 166 |
+
- headline: "Developed military training optimization solution"
|
| 167 |
+
variations:
|
| 168 |
+
- "Developed and programmed a genetic algorithm to solve an optimization model in MATLAB for allocating training devices at offices and scheduling training"
|
| 169 |
+
- "Formulated and programmed a genetic algorithm for optimizing training device allocation and training scheduling at multiple offices"
|
| 170 |
+
|
| 171 |
+
- headline: "Created technical documentation"
|
| 172 |
+
variations:
|
| 173 |
+
- "Documented all relevant information for further team research"
|
| 174 |
+
technical_skills:
|
| 175 |
+
- category: "Programming & Data Analytics"
|
| 176 |
+
skills:
|
| 177 |
+
- "Python"
|
| 178 |
+
- "R"
|
| 179 |
+
- "SQL"
|
| 180 |
+
- "Java"
|
| 181 |
+
- "C++"
|
| 182 |
+
- "MATLAB"
|
| 183 |
+
- "Excel VBA"
|
| 184 |
+
|
| 185 |
+
- category: "Machine Learning & Causal Inference"
|
| 186 |
+
skills:
|
| 187 |
+
- "scikit-learn"
|
| 188 |
+
- "PyTorch"
|
| 189 |
+
- "DoWhy"
|
| 190 |
+
- "econml"
|
| 191 |
+
- "pandas"
|
| 192 |
+
- "NumPy"
|
| 193 |
+
|
| 194 |
+
- category: "Optimization & Complex Modeling"
|
| 195 |
+
skills:
|
| 196 |
+
- "Gurobi"
|
| 197 |
+
- "CPLEX"
|
| 198 |
+
- "GAMS"
|
| 199 |
+
- "Google OR-Tools"
|
| 200 |
+
- "IBM Optimization Studio"
|
| 201 |
+
|
| 202 |
+
- category: "Simulation Software"
|
| 203 |
+
skills:
|
| 204 |
+
- "Arena"
|
| 205 |
+
- "@RISK"
|
| 206 |
+
- "Simio"
|
| 207 |
+
|
| 208 |
+
- category: "Data Visualization"
|
| 209 |
+
skills:
|
| 210 |
+
- "Tableau"
|
| 211 |
+
- "Power BI"
|
| 212 |
+
- "matplotlib"
|
| 213 |
+
- "seaborn"
|
| 214 |
+
|
| 215 |
+
- category: "Cloud & Development Tools"
|
| 216 |
+
skills:
|
| 217 |
+
- "Microsoft Azure Machine Learning"
|
| 218 |
+
- "Git"
|
| 219 |
+
|
samples/job_description.txt
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Instacart
|
| 2 |
+
Sr. Data Scientist
|
| 3 |
+
We're transforming the grocery industry
|
| 4 |
+
|
| 5 |
+
At Instacart, we invite the world to share love through food because we believe everyone should have access to the food they love and more time to enjoy it together. Where others see a simple need for grocery delivery, we see exciting complexity and endless opportunity to serve the varied needs of our community. We work to deliver an essential service that customers rely on to get their groceries and household goods, while also offering safe and flexible earnings opportunities to Instacart Personal Shoppers.
|
| 6 |
+
|
| 7 |
+
Instacart has become a lifeline for millions of people, and we’re building the team to help push our shopping cart forward. If you’re ready to do the best work of your life, come join our table.
|
| 8 |
+
|
| 9 |
+
Instacart is a Flex First team
|
| 10 |
+
|
| 11 |
+
There’s no one-size fits all approach to how we do our best work. Our employees have the flexibility to choose where they do their best work—whether it’s from home, an office, or your favorite coffee shop—while staying connected and building community through regular in-person events. Learn more about our flexible approach to where we work.
|
| 12 |
+
|
| 13 |
+
OVERVIEW
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
About the Role
|
| 18 |
+
|
| 19 |
+
This is a general posting for multiple Sr. Data Science roles open across our 4-sided marketplace. You’ll get the chance to learn about the different problems the Data Science team solves as you go through the process. Towards the end of your process, we’ll do a team-matching exercise to determine which of the open roles/teams you’ll join. You can find a blurb on each team at the bottom of this page.
|
| 20 |
+
|
| 21 |
+
With regards to level, we have flexibility. Your level will be determined by your education, years of experience, and most importantly, how you perform throughout the interview process. If you're targeting a senior title, you'll need to prove to the team that you're operating at that level.
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
About the Team
|
| 26 |
+
|
| 27 |
+
You will be joining a growing data science team and will tackle some of the most challenging and impactful problems that are transforming how people buy groceries every day. You will be embedded within our data-driven product team as a trusted partner in uncovering barriers in the product’s usability and utilize these insights to inform product improvements that drive angle-changing growth. We’re looking for a self-driven, strategic thinker who can hit the ground running to ultimately influence decision-making across the entire organization.
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
ABOUT THE JOB
|
| 32 |
+
|
| 33 |
+
Own analytical frameworks that guide the product roadmap
|
| 34 |
+
Design rigorous experiments and interpret results to draw detailed and actionable conclusions
|
| 35 |
+
Develop statistical models to extract trends, measure results, and predict future performance of our product
|
| 36 |
+
Build simulations to project impact of various product and policy interventions
|
| 37 |
+
Enable objective decision making across the company by democratizing data through dashboards and other analytical tools
|
| 38 |
+
Use expertise in causal inference, machine learning, complex systems modeling, behavioral decision theory etc. to shape the future of Instacart
|
| 39 |
+
Present findings in a compelling way to influence Instacart’s leadership
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
ABOUT YOU
|
| 43 |
+
|
| 44 |
+
Minimum Qualifications
|
| 45 |
+
|
| 46 |
+
6+ years experience working in a quantitative role at a product company or a research organization
|
| 47 |
+
Ability to run rigorous experiments and come up with scientifically sound recommendations
|
| 48 |
+
Ability to write complex, efficient, and eloquent SQL queries to extract data
|
| 49 |
+
Ability to write efficient and eloquent code in Python or R
|
| 50 |
+
A desire to build and improve consumer software products
|
| 51 |
+
Ability to translate business needs into analytical frameworks
|
| 52 |
+
Eagerness to learn, flexibility to pivot when needed, savviness to navigate and thrive in a dynamic environment, and a growth mindset needed to build a successful team and company
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Preferred Qualifications
|
| 56 |
+
|
| 57 |
+
Awareness of business trade offs when working on a multi-sided marketplace
|
| 58 |
+
Confidence in collaborating with and influencing cross-functional stakeholders (i.e. Product, Engineering), at a senior level
|
| 59 |
+
MS/PhD in Statistics, Economics, Applied Mathematics, or a similar field
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
OPEN ROLES:
|
| 63 |
+
|
| 64 |
+
Growth & Marketing Tech
|
| 65 |
+
|
| 66 |
+
Instacart's marketing data science team serves as thought partners and problem solvers for the Growth and Marketing orgs, supporting our broader mission to give everyone access to the food they love. We are responsible for understanding the end to end scope of growth marketing, including how marketing shapes and impacts customer behavior both today and in the long run, the efficiency and performance of any marketing technology we utilize, and what the highest ROI investments are in both marketing and marketing technology. We synthesize our insights and knowledge into actionable tactics, optimizations, and strategy within marketing.
|
| 67 |
+
|
| 68 |
+
Marketplace Balancing & Coordination
|
| 69 |
+
|
| 70 |
+
We are looking for an experienced data scientist with a background in complex and dynamic marketplace problems. The successful candidate will take on a leading role working on Instacart’s core marketplace problems including shopper pay and incentives, marketplace balancing, and experimentation. The role will partner with senior product managers and engineers to tackle analytical, experimental, and inferential problems to understand and expand the efficient frontiers of a balanced, sustainable, and resilient multi-sided marketplace.
|
| 71 |
+
|
| 72 |
+
New Bets
|
| 73 |
+
|
| 74 |
+
The New Bets Data Science team exists to provide data-driven insights and strategic guidance around Instacart’s most promising emerging initiatives, collectively known as "new bets”. You will play a critical role in supporting these exciting new bets, including Instacart Business and Health, by establishing data foundations, discovering new opportunities, and evaluating success using your business judgement and experimentation. The position requires a scrappy, "startup-like" mindset, as you will partner closely with self-directed teams forming new business lines all while leveraging the knowledge and systems of Instacart's established marketplace.
|
| 75 |
+
|
| 76 |
+
Order Experience
|
| 77 |
+
|
| 78 |
+
We are seeking a Senior Data Scientist to join our Order Experience team at Instacart, with a keen focus on enhancing consumer interactions through strategic data-driven insights. The ideal candidate should be passionate about creating a seamless user experience, focusing on optimizing our critical surfaces such as cart and checkout, and playing a significant role in strategic initiatives like Family Account features. This role requires strong product-facing data science experience, strategic thinking, and adeptness at collaborating with stakeholders to translate complex data into actionable product recommendations.
|
| 79 |
+
|
| 80 |
+
Retailer Storefronts
|
| 81 |
+
|
| 82 |
+
Retailer Storefronts builds white label storefronts for retail partners and committed to being the best digital enablement platform for our retailers, both online and in-store. DSA supports Retailer Storefronts by leveraging observational data to help drive product and marketing strategies grow our and our retailers' businesses.
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
#LI-Remote
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
Instacart provides highly market-competitive compensation and benefits in each location where our employees work. This role is remote and the base pay range for a successful candidate is dependent on their permanent work location. Please review our Flex First remote work policy here. Currently, we are only hiring in the following provinces: Ontario, Alberta and British Columbia.
|
| 90 |
+
|
| 91 |
+
Offers may vary based on many factors, such as candidate experience and skills required for the role. Additionally, this role is eligible for a new hire equity grant as well as annual refresh grants. Please read more about our benefits offerings here.
|
| 92 |
+
|
| 93 |
+
For Canadian based candidates, the base pay ranges for a successful candidate are listed below.
|
| 94 |
+
|
| 95 |
+
CAN
|
| 96 |
+
$158,000—$208,000 CAD
|
samples/summary_sample.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Offers 10 years of diverse analytics experience, with 8+ of these years having a strong focus on advanced analytics, predictive and prescriptive modelling
|
| 2 |
+
Expert in prescriptive modelling techniques, including optimization and simulation
|
| 3 |
+
Experienced in leading analytics teams to deliver projects on schedule
|
| 4 |
+
Proven capability in stakeholder collaboration across all levels of an organization to drive critical projects and decisions
|
| 5 |
+
Enhanced communication and presentation skills through Toastmasters experience
|
utils.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from anthropic import Anthropic
|
| 3 |
+
|
| 4 |
+
def process_ksas(job_description):
|
| 5 |
+
ksas = _ksas(job_description)
|
| 6 |
+
job_duties, req_quals, pref_quals = ksas['job_duties'], ksas['required_qualifications'], ksas['preferred_qualifications']
|
| 7 |
+
ksas_str = 'Job Duties\n' + '\n'.join(job_duties) + '\nRequired Qualifications\n' + '\n'.join(req_quals) + '\nPreferred Qualifications\n' + '\n'.join(pref_quals)
|
| 8 |
+
return ksas_str
|
| 9 |
+
|
| 10 |
+
def _ksas(job_description):
|
| 11 |
+
prompt = _ksa_extraction_prompt(job_description)
|
| 12 |
+
system = "You are a job description analyzer. Return only JSON in the format: {\"job_duties\": [\"duty 1\", \"duty 2\"], \"required_qualifications\": [\"qual 1\", \"qual 2\"], \"preferred_qualifications\": [\"pref 1\", \"pref 2\"]}"
|
| 13 |
+
return call_claude(prompt=prompt, system=system)
|
| 14 |
+
|
| 15 |
+
def _ksa_extraction_prompt(job_description):
|
| 16 |
+
return f"""Extract Key Selection Areas (KSAs) from this job description, maintaining exact original wording. Organize them into these categories:
|
| 17 |
+
|
| 18 |
+
Job Duties:
|
| 19 |
+
- List duties/responsibilities as stated in description
|
| 20 |
+
- Include any key activities or deliverables mentioned
|
| 21 |
+
|
| 22 |
+
Required Qualifications:
|
| 23 |
+
- List all explicitly stated required qualifications
|
| 24 |
+
- Include any specific metrics (e.g., years of experience)
|
| 25 |
+
- Include required technical skills, expertise, or knowledge areas
|
| 26 |
+
|
| 27 |
+
Preferred Qualifications:
|
| 28 |
+
- List only qualifications explicitly marked as "preferred," "desired," or similar
|
| 29 |
+
- Maintain any specific metrics mentioned
|
| 30 |
+
|
| 31 |
+
Do not list any implied requirements.
|
| 32 |
+
|
| 33 |
+
Format the response as JSON:
|
| 34 |
+
{{
|
| 35 |
+
"job_duties": ["duty 1", "duty 2", ...],
|
| 36 |
+
"required_qualifications": ["qual 1", "qual 2", ...],
|
| 37 |
+
"preferred_qualifications": ["pref 1", "pref 2", ...]
|
| 38 |
+
}}
|
| 39 |
+
|
| 40 |
+
Job Description:
|
| 41 |
+
{job_description}"""
|
| 42 |
+
|
| 43 |
+
def process_analysis(resume_data, job_description, ksas, freeform_context):
|
| 44 |
+
raw_analysis_output = _analysis(resume_data, job_description, ksas, freeform_context)
|
| 45 |
+
output = ["Analysis:"]
|
| 46 |
+
for a in raw_analysis_output['analysis']:
|
| 47 |
+
output.append("- " + a)
|
| 48 |
+
output.append("Questions:")
|
| 49 |
+
for q in raw_analysis_output['questions']:
|
| 50 |
+
output.append("- " + q)
|
| 51 |
+
output.append("Recommendation: " + raw_analysis_output['recommendation']['decision'])
|
| 52 |
+
output.append("Rationale: " + raw_analysis_output['recommendation']['rationale'])
|
| 53 |
+
return("\n".join(output))
|
| 54 |
+
|
| 55 |
+
def _analysis(resume_data, job_description, ksas, freeform_context):
|
| 56 |
+
prompt = _analysis_prompt(resume_data, job_description, ksas, freeform_context)
|
| 57 |
+
print(prompt[:1000])
|
| 58 |
+
print("===")
|
| 59 |
+
print(prompt[-1000:])
|
| 60 |
+
system = """You are an expert resume consultant analyzing candidate fit. Return only JSON in the format: {
|
| 61 |
+
"analysis": ["key insight 1", "key insight 2", ...],
|
| 62 |
+
"questions": ["question 1", "question 2", ...],
|
| 63 |
+
"recommendation": {
|
| 64 |
+
"decision": "Go/No Go",
|
| 65 |
+
"rationale": "Brief explanation"
|
| 66 |
+
}
|
| 67 |
+
}"""
|
| 68 |
+
|
| 69 |
+
return call_claude(prompt=prompt, system=system)
|
| 70 |
+
|
| 71 |
+
def _analysis_prompt(resume_data, job_description, ksas, freeform_context):
|
| 72 |
+
return f"""You are an expert resume consultant analyzing a candidate's background for a specific role.
|
| 73 |
+
|
| 74 |
+
Target Job Description:
|
| 75 |
+
{job_description}
|
| 76 |
+
|
| 77 |
+
Core Requirements (for reference):
|
| 78 |
+
{ksas}
|
| 79 |
+
|
| 80 |
+
Candidate's Resume Data:
|
| 81 |
+
{resume_data}
|
| 82 |
+
|
| 83 |
+
Additional Context and Background:
|
| 84 |
+
{freeform_context}
|
| 85 |
+
|
| 86 |
+
Task:
|
| 87 |
+
Review all materials and provide two things:
|
| 88 |
+
1. Key insights about how the candidate's experience aligns with the role
|
| 89 |
+
2. Clarifying questions that would help optimize the candidate's application with respect to the core requirements
|
| 90 |
+
3. A clear "Go/No Go" recommendation with brief justification
|
| 91 |
+
|
| 92 |
+
Requirements for Analysis:
|
| 93 |
+
- Identify strongest alignment points between experience and job requirements
|
| 94 |
+
- Note any potential gaps or areas needing clarification
|
| 95 |
+
- Consider both explicit and implicit job requirements
|
| 96 |
+
- Focus on relevant experience only
|
| 97 |
+
- Do not make assumptions about undocumented experience
|
| 98 |
+
|
| 99 |
+
Requirements for Questions:
|
| 100 |
+
- Ask for specific details that would strengthen the application
|
| 101 |
+
- Focus questions on areas where additional context could help highlight relevant experience
|
| 102 |
+
- Ask about any ambiguous experiences that might be valuable if clarified
|
| 103 |
+
- Only ask questions that would materially improve the application if answered
|
| 104 |
+
- Prioritize questions about experiences mentioned in the materials, not hypothetical experience
|
| 105 |
+
- Questions should be relevant to the core requirements
|
| 106 |
+
|
| 107 |
+
Requirements for Recommendation:
|
| 108 |
+
- Provide a clear "Go" or "No Go" recommendation
|
| 109 |
+
- Base recommendation on alignment between requirements and documented experience
|
| 110 |
+
- Consider both minimum and preferred qualifications
|
| 111 |
+
- Include 1-2 sentences explaining the recommendation
|
| 112 |
+
- Be direct and honest while remaining constructive
|
| 113 |
+
|
| 114 |
+
Important:
|
| 115 |
+
- Only reference information provided in the materials
|
| 116 |
+
- Questions should be specific and actionable
|
| 117 |
+
- Focus on details that would be appropriate to include in a resume or cover letter
|
| 118 |
+
- Consider the role's specific requirements when forming questions
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
def process_core_content(resume_data, job_description, ksas, freeform_context):
|
| 122 |
+
experience_bullets = process_job_experience(resume_data, job_description, ksas, freeform_context)
|
| 123 |
+
technical_skills = process_technical_skills(resume_data, job_description, ksas)
|
| 124 |
+
return experience_bullets, technical_skills
|
| 125 |
+
|
| 126 |
+
def process_job_experience(resume_data, job_description, ksas, freeform_context):
|
| 127 |
+
output = []
|
| 128 |
+
# Process each job experience
|
| 129 |
+
for job in resume_data['experience']:
|
| 130 |
+
output.append(f"\nOptimizing bullets for: {job['title']}")
|
| 131 |
+
optimized = optimize_job_experience(job, job_description, ksas, freeform_context)
|
| 132 |
+
output.append("\nOptimized bullets:")
|
| 133 |
+
for bullet in optimized['bullets']:
|
| 134 |
+
output.append(f"• {bullet}")
|
| 135 |
+
|
| 136 |
+
return "\n".join(output)
|
| 137 |
+
|
| 138 |
+
def optimize_job_experience(job_experience, job_description, ksas, freeform_context):
|
| 139 |
+
prompt = _experience_optimization_prompt(job_experience, job_description, ksas, freeform_context)
|
| 140 |
+
system = "You are an expert resume writer. Return ONLY valid JSON with no explanation, formatting, or additional text. The response must exactly match this format: {\"bullets\": [\"bullet1\", \"bullet2\", ...]}"
|
| 141 |
+
return call_claude(prompt=prompt, system=system)
|
| 142 |
+
|
| 143 |
+
def _experience_optimization_prompt(job_experience, job_description, ksas, freeform_context):
|
| 144 |
+
return f"""Given this job experience and target job description, create optimized bullet points that best position the candidate for the role. Choose the number of bullets based on the relevance and importance of this experience to the target role.
|
| 145 |
+
|
| 146 |
+
Historical Job Title: {job_experience['title']}
|
| 147 |
+
|
| 148 |
+
Complete Job Description:
|
| 149 |
+
{job_description}
|
| 150 |
+
|
| 151 |
+
Core Requirements (use these as your primary reference for optimization):
|
| 152 |
+
{ksas}
|
| 153 |
+
|
| 154 |
+
Available bullet points and variations:
|
| 155 |
+
{_formatted_bullets(job_experience['bullets'])}
|
| 156 |
+
|
| 157 |
+
Additional Context (use to enhance understanding of experiences):
|
| 158 |
+
{freeform_context}
|
| 159 |
+
|
| 160 |
+
Important:
|
| 161 |
+
- Read the complete job description for full context
|
| 162 |
+
- When prioritizing content, focus primarily on the Core Requirements listed above
|
| 163 |
+
- Use the Additional Context to better understand experiences, but only generate bullets from information present in the bullet variations
|
| 164 |
+
- If there's ambiguity about an experience, refer to the Additional Context for clarification
|
| 165 |
+
- Use strong action verbs
|
| 166 |
+
- Quantify impact where possible
|
| 167 |
+
- Highlight relevant technical skills
|
| 168 |
+
- Focus on achievements over responsibilities
|
| 169 |
+
- Maintain truthfulness - only use information present in the variations
|
| 170 |
+
- The reader should not be able to tell AI wrote the bullet points
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
def _formatted_bullets(bullets):
|
| 174 |
+
formatted = []
|
| 175 |
+
for bullet in bullets:
|
| 176 |
+
formatted.append(f"Headline: {bullet['headline']}")
|
| 177 |
+
formatted.append("Variations:")
|
| 178 |
+
for variation in bullet['variations']:
|
| 179 |
+
formatted.append(f"- {variation}")
|
| 180 |
+
formatted.append("") # Empty line between bullet groups
|
| 181 |
+
|
| 182 |
+
return "\n".join(formatted)
|
| 183 |
+
|
| 184 |
+
def process_technical_skills(resume_data, job_description, ksas):
|
| 185 |
+
output = []
|
| 186 |
+
raw_tech_skills = resume_data['technical_skills']
|
| 187 |
+
optimized_tech_skills = optimize_technical_skills(raw_tech_skills, job_description, ksas)
|
| 188 |
+
|
| 189 |
+
output.append("Technical Skills:\n\n")
|
| 190 |
+
for skill_cat in optimized_tech_skills['technical_skills']:
|
| 191 |
+
output.append(skill_cat['category'] + ": " + ', '.join(skill_cat['skills']))
|
| 192 |
+
|
| 193 |
+
return "\n".join(output)
|
| 194 |
+
|
| 195 |
+
def optimize_technical_skills(tech_skills, job_description, ksas):
|
| 196 |
+
prompt = _skills_prompt(tech_skills, job_description, ksas)
|
| 197 |
+
system = "You are an expert resume writer. Return ONLY valid JSON with no explanation or additional text. The response must exactly match this format: {\"technical_skills\": [{\"category\": \"Category Name\", \"skills\": [\"Skill 1\", \"Skill 2\"]}, ...]}"
|
| 198 |
+
return call_claude(prompt=prompt, system=system)
|
| 199 |
+
|
| 200 |
+
def _skills_prompt(all_skills, job_description, ksas):
|
| 201 |
+
return f"""You are an expert resume writer helping optimize a technical skills section for a specific role.
|
| 202 |
+
|
| 203 |
+
Target Job Description:
|
| 204 |
+
{job_description}
|
| 205 |
+
|
| 206 |
+
Core Requirements (use these as your primary reference for optimization):
|
| 207 |
+
{ksas}
|
| 208 |
+
|
| 209 |
+
Available skills and their current categories:
|
| 210 |
+
{_formatted_skills(all_skills)}
|
| 211 |
+
|
| 212 |
+
Task:
|
| 213 |
+
Create an optimized technical skills section that best positions the candidate for this role.
|
| 214 |
+
|
| 215 |
+
Requirements:
|
| 216 |
+
|
| 217 |
+
1. Read the complete job description for context
|
| 218 |
+
2. When selecting and ordering skills, prioritize those that directly match the Core Requirements
|
| 219 |
+
3. If there's ambiguity about skill importance, defer to the Core Requirements
|
| 220 |
+
4. Order categories from most to least relevant for this specific role
|
| 221 |
+
5. Within each category, order skills from most to least relevant
|
| 222 |
+
6. Create new categories or rename existing ones if it would better align with the job requirements
|
| 223 |
+
7. Include only relevant skills; omit those that don't add value for this role
|
| 224 |
+
8. Keep categories and skills lists concise and impactful
|
| 225 |
+
9. Use standard industry terminology for categories
|
| 226 |
+
|
| 227 |
+
Format the response as a JSON list:
|
| 228 |
+
{{"technical_skills": [
|
| 229 |
+
{{"category": "Category Name", "skills": ["Skill 1", "Skill 2", ...]}},
|
| 230 |
+
...
|
| 231 |
+
]}}
|
| 232 |
+
|
| 233 |
+
Important:
|
| 234 |
+
- Don't assume skill importance based on frequency of appearance
|
| 235 |
+
- Only include skills that are present in the input list
|
| 236 |
+
- Focus on skills that align with job requirements
|
| 237 |
+
- Ensure category names reflect current industry standards
|
| 238 |
+
"""
|
| 239 |
+
|
| 240 |
+
def _formatted_skills(tech_skills):
|
| 241 |
+
cat_str_out = [f"{cat['category']}: " + ', '.join(cat['skills']) for cat in tech_skills]
|
| 242 |
+
return("\n".join(cat_str_out))
|
| 243 |
+
|
| 244 |
+
def process_summary(job_description, ksas, experience_bullets, technical_skills, resume_context, summary_sample):
|
| 245 |
+
summary = _summary(job_description, ksas, experience_bullets, technical_skills, resume_context, summary_sample)
|
| 246 |
+
summary_str = 'Summary\n' + '\n'.join(summary['summary'])
|
| 247 |
+
return summary_str
|
| 248 |
+
|
| 249 |
+
def _summary(job_description, ksas, experience_bullets, technical_skills, resume_context, summary_sample):
|
| 250 |
+
prompt = _summary_prompt(job_description, ksas, experience_bullets, technical_skills, resume_context, summary_sample)
|
| 251 |
+
system = "You are an expert resume writer. Return only JSON in the format: {\"summary\": [\"bullet1\", \"bullet2\", ...]}"
|
| 252 |
+
return call_claude(prompt=prompt, system=system)
|
| 253 |
+
|
| 254 |
+
def _summary_prompt(job_description, ksas, experience_bullets, technical_skills, resume_context, summary_sample):
|
| 255 |
+
return f"""You are an expert resume writer creating a powerful summary section for a specific role.
|
| 256 |
+
|
| 257 |
+
Target Job Description:
|
| 258 |
+
{job_description}
|
| 259 |
+
|
| 260 |
+
Core Requirements (use as primary reference):
|
| 261 |
+
{ksas}
|
| 262 |
+
|
| 263 |
+
Candidate's Optimized Experience:
|
| 264 |
+
{experience_bullets}
|
| 265 |
+
|
| 266 |
+
Candidate's Optimized Technical Skills:
|
| 267 |
+
{technical_skills}
|
| 268 |
+
|
| 269 |
+
Additional Context:
|
| 270 |
+
{resume_context}
|
| 271 |
+
|
| 272 |
+
Writing Style Reference:
|
| 273 |
+
{summary_sample}
|
| 274 |
+
|
| 275 |
+
Task:
|
| 276 |
+
Create 5-10 impactful summary bullets that will immediately convince the hiring manager to carefully review this resume.
|
| 277 |
+
|
| 278 |
+
Requirements:
|
| 279 |
+
1. Focus heavily on matching the Core Requirements, especially required qualifications
|
| 280 |
+
2. Use only information provided in the experience bullets, technical skills, and resume context
|
| 281 |
+
3. Do not fabricate or embellish experience
|
| 282 |
+
4. Order bullets from most to least relevant to the role
|
| 283 |
+
5. Keep total length under 100 words
|
| 284 |
+
6. Match the style of the provided summary sample while prioritizing content alignment with job requirements
|
| 285 |
+
7. Write for quick scanning - each bullet should build confidence in the candidate's fit
|
| 286 |
+
8. If a job requirement isn't addressed, assume the candidate lacks that experience
|
| 287 |
+
|
| 288 |
+
Format:
|
| 289 |
+
Return 5-10 bullets in JSON format: {{"summary": ["bullet1", "bullet2", ...]}}
|
| 290 |
+
|
| 291 |
+
Important:
|
| 292 |
+
- Prioritize demonstrating fit for core job requirements
|
| 293 |
+
- Use strong, active language
|
| 294 |
+
- Be specific and quantifiable where possible
|
| 295 |
+
- Focus on achievements and capabilities
|
| 296 |
+
- Maintain truthfulness - only use provided information
|
| 297 |
+
- The reader should not be able to tell AI wrote the summary
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
def _cover_letter_prompt(job_description, ksas, experience_bullets, technical_skills, resume_context, summary, cover_letter_sample):
|
| 301 |
+
return f"""You are writing a compelling cover letter for a specific role, matching the candidate's authentic writing style.
|
| 302 |
+
|
| 303 |
+
Target Job Description:
|
| 304 |
+
{job_description}
|
| 305 |
+
|
| 306 |
+
Core Requirements (for reference):
|
| 307 |
+
{ksas}
|
| 308 |
+
|
| 309 |
+
Candidate's Qualifications and Experience:
|
| 310 |
+
Summary:
|
| 311 |
+
{summary}
|
| 312 |
+
|
| 313 |
+
Experience Details:
|
| 314 |
+
{experience_bullets}
|
| 315 |
+
|
| 316 |
+
Technical Skills:
|
| 317 |
+
{technical_skills}
|
| 318 |
+
|
| 319 |
+
Additional Background:
|
| 320 |
+
{resume_context}
|
| 321 |
+
|
| 322 |
+
Writing Style Reference (match this tone and structure):
|
| 323 |
+
{cover_letter_sample}
|
| 324 |
+
|
| 325 |
+
Task:
|
| 326 |
+
Write a one-page cover letter that demonstrates the candidate is an excellent fit for this role.
|
| 327 |
+
|
| 328 |
+
Requirements:
|
| 329 |
+
1. Match the candidate's writing style exactly - natural, professional, and personally engaging
|
| 330 |
+
2. Focus on experiences that directly address the job's core requirements
|
| 331 |
+
3. Use only information provided in the resume materials.
|
| 332 |
+
4. Highlight achievements that differentiate the candidate
|
| 333 |
+
5. Keep to one page in length
|
| 334 |
+
6. Maintain first-person perspective throughout
|
| 335 |
+
|
| 336 |
+
Important:
|
| 337 |
+
- Do not fabricate or embellish any experiences
|
| 338 |
+
- If a requirement isn't addressed in the provided materials, do not mention it
|
| 339 |
+
- Focus on specific, concrete examples rather than generic statements
|
| 340 |
+
- Match the personal, authentic tone of the sample letter
|
| 341 |
+
- Match the general structure of the sample letter
|
| 342 |
+
- The reader should not be able to tell AI wrote the cover letter
|
| 343 |
+
|
| 344 |
+
Format the response as JSON: {{"cover_letter": "Dear Hiring Manager,\\n\\n[cover letter content]\\n\\nSincerely,\\nNathan"}}"""
|
| 345 |
+
|
| 346 |
+
def _cover_letter(job_description, ksas, experience_bullets, technical_skills, resume_context, summary, cover_letter_sample):
|
| 347 |
+
prompt = _cover_letter_prompt(job_description, ksas, experience_bullets, technical_skills, resume_context, summary, cover_letter_sample)
|
| 348 |
+
system = "You are an expert resume writer crafting cover letters. Return ONLY valid JSON with no explanation or additional text. The response must exactly match this format: {\"cover_letter\": \"Dear Hiring Manager,\\n\\n[letter content]\\n\\nSincerely,\\nNathan\"} Maintain natural writing style and first-person perspective."
|
| 349 |
+
return call_claude(prompt=prompt, system=system)['cover_letter']
|
| 350 |
+
|
| 351 |
+
def call_claude(prompt, system, api_key=None):
|
| 352 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
| 353 |
+
client = Anthropic(api_key=api_key)
|
| 354 |
+
response = client.messages.create(
|
| 355 |
+
model="claude-3-5-sonnet-20241022",
|
| 356 |
+
max_tokens=1500,
|
| 357 |
+
temperature=0,
|
| 358 |
+
system=system,
|
| 359 |
+
messages=[{
|
| 360 |
+
"role": "user",
|
| 361 |
+
"content": prompt
|
| 362 |
+
}]
|
| 363 |
+
)
|
| 364 |
+
try:
|
| 365 |
+
result = json.loads(response.content[0].text)
|
| 366 |
+
return result
|
| 367 |
+
except Exception as e:
|
| 368 |
+
print("Raw response:", response.content[0].text) # Debug line
|
| 369 |
+
raise ValueError(f"Failed to parse Claude's response: {e}")
|