html = re.sub(r'(- .*?
(?:\s*- .*?
)*)', r'', html, flags=re.DOTALL)
# Convert double line breaks to paragraphs
paragraphs = html.split('\n\n')
html_paragraphs = []
for para in paragraphs:
para = para.strip()
if para:
# Don't wrap headers or lists in tags
if not (para.startswith('') or para.startswith('- ')):
para = f'
{para}
'
html_paragraphs.append(para)
html = '\n'.join(html_paragraphs)
# Convert remaining single line breaks to
tags within paragraphs
html = re.sub(r'(?)\n(?!<)', '
', html)
# Clean up extra
tags around block elements
html = re.sub(r'
\s*()', r'\1', html)
html = re.sub(r'()\s*
', r'\1', html)
html = re.sub(r'
\s*(|)', r'\1', html)
html = re.sub(r'(
|
)\s*
', r'\1', html)
return html
def generate_html_report(self, url: str, technical_data: Dict[str, Any],
content_data: Dict[str, Any], competitor_data: List[Dict] = None,
keywords_data: Dict[str, Any] = None, backlinks_data: Dict[str, Any] = None,
llm_recommendations: Dict[str, Any] = None, include_charts: bool = True) -> str:
"""Generate complete HTML SEO report"""
# Generate charts
charts_html = ""
if include_charts:
charts_html = self._generate_charts(technical_data, content_data, competitor_data, keywords_data, backlinks_data)
# Generate executive summary with benchmarks
executive_summary = self._generate_executive_summary_with_badges(technical_data, content_data, keywords_data, backlinks_data)
# Generate technical SEO section
technical_section = self._generate_technical_section(technical_data)
# Generate content audit section
content_section = self._generate_content_section(content_data)
# Generate keywords section
keywords_section = self._generate_keywords_section(keywords_data) if keywords_data else ""
# Generate backlinks section
backlinks_section = self._generate_backlinks_section(backlinks_data) if backlinks_data else ""
# Generate LLM recommendations section
recommendations_section = self._generate_recommendations_section(llm_recommendations) if llm_recommendations else ""
# Generate competitor section
competitor_section = ""
if competitor_data:
competitor_section = self._generate_competitor_section(competitor_data, technical_data, content_data)
# Generate recommendations
recommendations = self._generate_recommendations(technical_data, content_data)
# Compile final report
report_html = self.report_template.format(
url=url,
generated_date=datetime.now().strftime("%B %d, %Y at %I:%M %p"),
charts=charts_html,
executive_summary=executive_summary,
technical_section=technical_section,
content_section=content_section,
keywords_section=keywords_section,
backlinks_section=backlinks_section,
competitor_section=competitor_section,
recommendations=recommendations,
llm_recommendations=recommendations_section
)
return report_html
def _generate_charts(self, technical_data: Dict[str, Any], content_data: Dict[str, Any],
competitor_data: List[Dict] = None, keywords_data: Dict[str, Any] = None,
backlinks_data: Dict[str, Any] = None) -> str:
"""Generate interactive charts using Plotly"""
charts_html = ""
# Performance Scores Chart
if not technical_data.get('error'):
mobile_scores = technical_data.get('mobile', {})
desktop_scores = technical_data.get('desktop', {})
performance_fig = go.Figure()
categories = ['Performance', 'SEO', 'Accessibility', 'Best Practices']
mobile_values = [
mobile_scores.get('performance_score', 0),
mobile_scores.get('seo_score', 0),
mobile_scores.get('accessibility_score', 0),
mobile_scores.get('best_practices_score', 0)
]
desktop_values = [
desktop_scores.get('performance_score', 0),
desktop_scores.get('seo_score', 0),
desktop_scores.get('accessibility_score', 0),
desktop_scores.get('best_practices_score', 0)
]
performance_fig.add_trace(go.Bar(
name='Mobile',
x=categories,
y=mobile_values,
marker_color='#FF6B6B'
))
performance_fig.add_trace(go.Bar(
name='Desktop',
x=categories,
y=desktop_values,
marker_color='#4ECDC4'
))
performance_fig.update_layout(
title='PageSpeed Insights Scores',
xaxis_title='Categories',
yaxis_title='Score (0-100)',
barmode='group',
height=400,
showlegend=True
)
charts_html += f'{plot(performance_fig, output_type="div", include_plotlyjs=False)}
'
# Core Web Vitals Chart
if not technical_data.get('error'):
cwv_data = technical_data.get('core_web_vitals', {})
mobile_cwv = cwv_data.get('mobile', {})
desktop_cwv = cwv_data.get('desktop', {})
cwv_fig = go.Figure()
metrics = ['LCP (s)', 'CLS', 'INP (ms)', 'FCP (s)']
mobile_cwv_values = [
mobile_cwv.get('lcp', 0),
mobile_cwv.get('cls', 0),
mobile_cwv.get('inp', 0),
mobile_cwv.get('fcp', 0)
]
desktop_cwv_values = [
desktop_cwv.get('lcp', 0),
desktop_cwv.get('cls', 0),
desktop_cwv.get('inp', 0),
desktop_cwv.get('fcp', 0)
]
cwv_fig.add_trace(go.Scatter(
name='Mobile',
x=metrics,
y=mobile_cwv_values,
mode='lines+markers',
line=dict(color='#FF6B6B', width=3),
marker=dict(size=8)
))
cwv_fig.add_trace(go.Scatter(
name='Desktop',
x=metrics,
y=desktop_cwv_values,
mode='lines+markers',
line=dict(color='#4ECDC4', width=3),
marker=dict(size=8)
))
cwv_fig.update_layout(
title='Core Web Vitals Performance',
xaxis_title='Metrics',
yaxis_title='Values',
height=400,
showlegend=True
)
charts_html += f'{plot(cwv_fig, output_type="div", include_plotlyjs=False)}
'
# Metadata Completeness Chart
if not content_data.get('error'):
metadata = content_data.get('metadata_completeness', {})
completeness_fig = go.Figure(data=[go.Pie(
labels=['Title Tags', 'Meta Descriptions', 'H1 Tags'],
values=[
metadata.get('title_coverage', 0),
metadata.get('description_coverage', 0),
metadata.get('h1_coverage', 0)
],
hole=0.4,
marker_colors=['#FF6B6B', '#4ECDC4', '#45B7D1']
)])
completeness_fig.update_layout(
title='Metadata Completeness (%)',
height=400,
showlegend=True
)
charts_html += f'{plot(completeness_fig, output_type="div", include_plotlyjs=False)}
'
# Content Freshness Chart
if not content_data.get('error'):
freshness = content_data.get('content_freshness', {})
freshness_fig = go.Figure(data=[go.Pie(
labels=['Fresh (<6 months)', 'Moderate (6-18 months)', 'Stale (>18 months)', 'Unknown Date'],
values=[
freshness.get('fresh_content', {}).get('count', 0),
freshness.get('moderate_content', {}).get('count', 0),
freshness.get('stale_content', {}).get('count', 0),
freshness.get('unknown_date', {}).get('count', 0)
],
marker_colors=['#2ECC71', '#F39C12', '#E74C3C', '#95A5A6']
)])
freshness_fig.update_layout(
title='Content Freshness Distribution',
height=400,
showlegend=True
)
charts_html += f'{plot(freshness_fig, output_type="div", include_plotlyjs=False)}
'
return charts_html
def _generate_executive_summary(self, technical_data: Dict[str, Any], content_data: Dict[str, Any],
keywords_data: Dict[str, Any] = None, backlinks_data: Dict[str, Any] = None,
llm_recommendations: Dict[str, Any] = None) -> str:
"""Generate executive summary section"""
# Calculate overall health score
mobile_perf = technical_data.get('mobile', {}).get('performance_score', 0)
desktop_perf = technical_data.get('desktop', {}).get('performance_score', 0)
avg_performance = (mobile_perf + desktop_perf) / 2
metadata_avg = 0
if not content_data.get('error'):
metadata = content_data.get('metadata_completeness', {})
metadata_avg = (
metadata.get('title_coverage', 0) +
metadata.get('description_coverage', 0) +
metadata.get('h1_coverage', 0)
) / 3
overall_score = (avg_performance + metadata_avg) / 2
# Health status
if overall_score >= 80:
health_status = "Excellent"
health_color = "#2ECC71"
elif overall_score >= 60:
health_status = "Good"
health_color = "#F39C12"
elif overall_score >= 40:
health_status = "Fair"
health_color = "#FF6B6B"
else:
health_status = "Poor"
health_color = "#E74C3C"
# Quick wins
quick_wins = []
if not content_data.get('error'):
metadata = content_data.get('metadata_completeness', {})
if metadata.get('title_coverage', 0) < 90:
quick_wins.append(f"Complete missing title tags ({100 - metadata.get('title_coverage', 0):.1f}% of pages missing)")
if metadata.get('description_coverage', 0) < 90:
quick_wins.append(f"Add missing meta descriptions ({100 - metadata.get('description_coverage', 0):.1f}% of pages missing)")
if metadata.get('h1_coverage', 0) < 90:
quick_wins.append(f"Add missing H1 tags ({100 - metadata.get('h1_coverage', 0):.1f}% of pages missing)")
if mobile_perf < 70:
quick_wins.append(f"Improve mobile performance score (currently {mobile_perf:.1f}/100)")
quick_wins_html = "".join([f"- {win}
" for win in quick_wins[:5]])
return f"""
Overall SEO Health
{overall_score:.0f}
/ 100
{health_status}
Performance Score
Mobile: {mobile_perf:.1f}/100
Desktop: {desktop_perf:.1f}/100
Content Analysis
Pages Analyzed: {content_data.get('pages_analyzed', 0)}
Metadata Completeness: {metadata_avg:.1f}%
🎯 Quick Wins
{quick_wins_html}
{'' if quick_wins else '- Great job! No immediate quick wins identified.
'}
"""
def _generate_executive_summary_with_badges(self, technical_data: Dict[str, Any],
content_data: Dict[str, Any],
keywords_data: Dict[str, Any] = None,
backlinks_data: Dict[str, Any] = None) -> str:
"""Generate executive summary with benchmark badges"""
# Extract metrics for badges
mobile_score = technical_data.get('mobile', {}).get('performance_score', 0)
cwv = technical_data.get('core_web_vitals', {}).get('mobile', {})
lcp_value = cwv.get('lcp', 0)
cls_value = cwv.get('cls', 0)
meta_complete_pct = content_data.get('meta_complete_pct', 0)
avg_words = content_data.get('avg_words', 0)
keywords_top10_pct = 0
if keywords_data and not keywords_data.get('placeholder'):
dist = keywords_data.get('position_distribution', {})
total = keywords_data.get('total_keywords', 0)
if total > 0:
keywords_top10_pct = (dist.get('top_10', 0) / total) * 100
domain_rating = backlinks_data.get('domain_rating', 0) if backlinks_data else 0
referring_domains = backlinks_data.get('total_ref_domains', 0) if backlinks_data else 0
# Generate badges
badges_html = self._generate_benchmark_badges(
mobile_score, lcp_value, cls_value, meta_complete_pct,
avg_words, keywords_top10_pct, domain_rating, referring_domains
)
# Overall health score
overall_score = (mobile_score + meta_complete_pct) / 2
if overall_score >= 80:
health_status = "Excellent"
health_color = "#2ECC71"
elif overall_score >= 60:
health_status = "Good"
health_color = "#F39C12"
elif overall_score >= 40:
health_status = "Fair"
health_color = "#FF6B6B"
else:
health_status = "Poor"
health_color = "#E74C3C"
return f"""
Overall SEO Health
{overall_score:.0f}
/ 100
{health_status}
📊 Benchmark Performance
{badges_html}
"""
def _generate_benchmark_badges(self, mobile_score, lcp_value, cls_value, meta_complete_pct,
avg_words, keywords_top10_pct, domain_rating, referring_domains) -> str:
"""Generate benchmark badges for executive summary"""
badges = [
badge(f"{mobile_score}", mobile_score >= BENCHMARKS['mobile_score_min']),
badge(f"{lcp_value:.1f}s", lcp_value <= BENCHMARKS['lcp_max'] if lcp_value > 0 else False),
badge(f"{cls_value:.3f}", cls_value <= BENCHMARKS['cls_max'] if cls_value >= 0 else False),
badge(f"{meta_complete_pct:.1f}%", meta_complete_pct >= BENCHMARKS['meta_complete_min']),
badge(f"{avg_words} words", BENCHMARKS['avg_words_min'] <= avg_words <= BENCHMARKS['avg_words_max'] if avg_words > 0 else False),
badge(f"{keywords_top10_pct:.1f}%", keywords_top10_pct >= BENCHMARKS['keywords_top10_min']),
badge(f"DR {domain_rating}", domain_rating >= BENCHMARKS['domain_rating_min']),
badge(f"{referring_domains} domains", referring_domains >= BENCHMARKS['referring_domains_min'])
]
badges_html = ''
labels = [
"Mobile Performance", "LCP", "CLS", "Meta Completeness",
"Content Length", "Top 10 Keywords", "Domain Rating", "Referring Domains"
]
targets = [
f"> {BENCHMARKS['mobile_score_min']}",
f"< {BENCHMARKS['lcp_max']}s",
f"< {BENCHMARKS['cls_max']}",
f"> {BENCHMARKS['meta_complete_min']}%",
f"{BENCHMARKS['avg_words_min']}-{BENCHMARKS['avg_words_max']}",
f"> {BENCHMARKS['keywords_top10_min']}%",
f"> {BENCHMARKS['domain_rating_min']}",
f"> {BENCHMARKS['referring_domains_min']}"
]
for i, (label, target, badge_data) in enumerate(zip(labels, targets, badges)):
status_class = 'pass' if badge_data['status'] == 'pass' else 'fail'
icon = '✓' if badge_data['status'] == 'pass' else '✗'
badges_html += f'''
{icon}
{badge_data['value']}
{label}
Target: {target}
'''
badges_html += '
'
return badges_html
def _generate_technical_section(self, technical_data: Dict[str, Any]) -> str:
"""Generate technical SEO section"""
if technical_data.get('error'):
return f"""
⚠️ Technical SEO Analysis
Unable to complete technical analysis: {technical_data.get('error')}
"""
mobile = technical_data.get('mobile', {})
desktop = technical_data.get('desktop', {})
cwv = technical_data.get('core_web_vitals', {})
opportunities = technical_data.get('opportunities', {}).get('opportunities', [])
# Core Web Vitals analysis
mobile_cwv = cwv.get('mobile', {})
cwv_analysis = []
lcp = mobile_cwv.get('lcp', 0)
if lcp > 2.5:
cwv_analysis.append(f"⚠️ LCP ({lcp:.2f}s) - Should be under 2.5s")
else:
cwv_analysis.append(f"✅ LCP ({lcp:.2f}s) - Good")
cls = mobile_cwv.get('cls', 0)
if cls > 0.1:
cwv_analysis.append(f"⚠️ CLS ({cls:.3f}) - Should be under 0.1")
else:
cwv_analysis.append(f"✅ CLS ({cls:.3f}) - Good")
# Opportunities list
opportunities_html = ""
for opp in opportunities[:5]:
opportunities_html += f"""
{opp.get('title', 'Optimization Opportunity')}
{opp.get('description', '')}
Potential savings: {opp.get('potential_savings', 0):.0f}ms
"""
return f"""
Mobile Performance
{mobile.get('performance_score', 0):.1f}/100
Desktop Performance
{desktop.get('performance_score', 0):.1f}/100
SEO Score
{mobile.get('seo_score', 0):.1f}/100
Accessibility
{mobile.get('accessibility_score', 0):.1f}/100
Core Web Vitals Analysis
{"".join([f"- {analysis}
" for analysis in cwv_analysis])}
🔧 Optimization Opportunities
{opportunities_html if opportunities_html else '
No major optimization opportunities identified.
'}
"""
def _generate_content_section(self, content_data: Dict[str, Any]) -> str:
"""Generate content audit section"""
if content_data.get('error'):
return f"""
⚠️ Content Audit
Unable to complete content analysis: {content_data.get('error')}
"""
metadata = content_data.get('metadata_completeness', {})
content_metrics = content_data.get('content_metrics', {})
freshness = content_data.get('content_freshness', {})
return f"""
Pages Discovered
{content_data.get('total_pages_discovered', 0)}
Pages Analyzed
{content_data.get('pages_analyzed', 0)}
Avg. Word Count
{content_metrics.get('avg_word_count', 0):.0f}
CTA Coverage
{content_metrics.get('cta_coverage', 0):.1f}%
📊 Content Quality Metrics
Average Word Count:
{content_metrics.get('avg_word_count', 0):.0f} words
(Recommended: 800-1200)
Call-to-Action Coverage:
{content_metrics.get('cta_coverage', 0):.1f}% of pages
(Target: 80%+)
🗓️ Content Freshness
Fresh Content (<6 months):
{freshness.get('fresh_content', {}).get('percentage', 0):.1f}%
Moderate Age (6-18 months):
{freshness.get('moderate_content', {}).get('percentage', 0):.1f}%
Stale Content (>18 months):
{freshness.get('stale_content', {}).get('percentage', 0):.1f}%
"""
def _generate_competitor_section(self, competitor_data: List[Dict],
primary_technical: Dict[str, Any],
primary_content: Dict[str, Any]) -> str:
"""Generate competitor comparison section"""
if not competitor_data:
return ""
comparison_html = """
🏆 Competitor Benchmarking
Domain |
Mobile Perf. |
Desktop Perf. |
SEO Score |
Content Pages |
"""
# Add primary site
primary_mobile = primary_technical.get('mobile', {}).get('performance_score', 0)
primary_desktop = primary_technical.get('desktop', {}).get('performance_score', 0)
primary_seo = primary_technical.get('mobile', {}).get('seo_score', 0)
primary_pages = primary_content.get('pages_analyzed', 0)
comparison_html += f"""
Your Site |
{primary_mobile:.1f} |
{primary_desktop:.1f} |
{primary_seo:.1f} |
{primary_pages} |
"""
# Add competitors
for comp in competitor_data:
comp_technical = comp.get('technical', {})
comp_content = comp.get('content', {})
comp_mobile = comp_technical.get('mobile', {}).get('performance_score', 0)
comp_desktop = comp_technical.get('desktop', {}).get('performance_score', 0)
comp_seo = comp_technical.get('mobile', {}).get('seo_score', 0)
comp_pages = comp_content.get('pages_analyzed', 0)
domain = comp.get('url', '').replace('https://', '').replace('http://', '')
comparison_html += f"""
{domain} |
{comp_mobile:.1f} |
{comp_desktop:.1f} |
{comp_seo:.1f} |
{comp_pages} |
"""
comparison_html += """
"""
return comparison_html
def _generate_recommendations(self, technical_data: Dict[str, Any], content_data: Dict[str, Any]) -> str:
"""Generate prioritized recommendations"""
recommendations = []
# Technical recommendations
if not technical_data.get('error'):
mobile = technical_data.get('mobile', {})
if mobile.get('performance_score', 0) < 70:
recommendations.append({
'priority': 'High',
'category': 'Technical SEO',
'title': 'Improve Mobile Performance',
'description': f'Mobile performance score is {mobile.get("performance_score", 0):.1f}/100. Focus on Core Web Vitals optimization.',
'timeline': '2-4 weeks'
})
# Content recommendations
if not content_data.get('error'):
metadata = content_data.get('metadata_completeness', {})
if metadata.get('title_coverage', 0) < 90:
recommendations.append({
'priority': 'High',
'category': 'Content',
'title': 'Complete Missing Title Tags',
'description': f'{100 - metadata.get("title_coverage", 0):.1f}% of pages are missing title tags. This directly impacts search visibility.',
'timeline': '1-2 weeks'
})
if metadata.get('description_coverage', 0) < 90:
recommendations.append({
'priority': 'Medium',
'category': 'Content',
'title': 'Add Missing Meta Descriptions',
'description': f'{100 - metadata.get("description_coverage", 0):.1f}% of pages are missing meta descriptions. Improve click-through rates from search results.',
'timeline': '2-3 weeks'
})
content_metrics = content_data.get('content_metrics', {})
if content_metrics.get('avg_word_count', 0) < 800:
recommendations.append({
'priority': 'Medium',
'category': 'Content',
'title': 'Increase Content Depth',
'description': f'Average word count is {content_metrics.get("avg_word_count", 0):.0f} words. Aim for 800-1200 words per page for better rankings.',
'timeline': '4-6 weeks'
})
# Sort by priority
priority_order = {'High': 0, 'Medium': 1, 'Low': 2}
recommendations.sort(key=lambda x: priority_order.get(x['priority'], 2))
recommendations_html = ""
for i, rec in enumerate(recommendations[:8], 1):
priority_color = {
'High': '#E74C3C',
'Medium': '#F39C12',
'Low': '#2ECC71'
}.get(rec['priority'], '#95A5A6')
recommendations_html += f"""
{rec['title']}
{rec['description']}
Timeline: {rec['timeline']}
"""
return f"""
🎯 Prioritized Recommendations
{recommendations_html if recommendations_html else '
Great job! No immediate recommendations identified.
'}
"""
def _generate_keywords_section(self, keywords_data: Dict[str, Any]) -> str:
"""Generate keywords analysis section"""
if keywords_data.get('placeholder'):
return f"""
🔍 Keyword Rankings
No keyword data available.
{keywords_data.get('message', 'Connect Google Search Console or SERP API to unlock keyword insights.')}
"""
total = keywords_data.get('total_keywords', 0)
pos_dist = keywords_data.get('position_distribution', {})
best_keywords = keywords_data.get('best_keywords', [])
opportunity_keywords = keywords_data.get('opportunity_keywords', [])
worst_keywords = keywords_data.get('worst_keywords', {})
# Create position distribution chart
pos_chart = ""
if pos_dist:
import plotly.graph_objects as go
from plotly.offline import plot
labels = ['Top 3', 'Top 10', 'Top 50', 'Beyond 50']
values = [
pos_dist.get('top_3', 0),
pos_dist.get('top_10', 0) - pos_dist.get('top_3', 0),
pos_dist.get('top_50', 0) - pos_dist.get('top_10', 0),
pos_dist.get('beyond_50', 0)
]
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=0.4)])
fig.update_layout(title="Keyword Position Distribution", height=400)
pos_chart = plot(fig, include_plotlyjs=False, output_type='div')
best_keywords_html = ""
if best_keywords:
best_keywords_html = "🏆 Top Performing Keywords
Keyword | Position | Clicks | Impressions |
"
for kw in best_keywords[:10]:
best_keywords_html += f"""
{kw.get('keyword', '')} |
{kw.get('position', 0)} |
{kw.get('clicks', 0)} |
{kw.get('impressions', 0)} |
"""
best_keywords_html += "
"
opportunity_html = ""
if opportunity_keywords:
opportunity_html = "🚀 Opportunity Keywords
Keyword | Position | Impressions | CTR |
"
for kw in opportunity_keywords[:10]:
opportunity_html += f"""
{kw.get('keyword', '')} |
{kw.get('position', 0)} |
{kw.get('impressions', 0)} |
{kw.get('ctr', 0)}% |
"""
opportunity_html += "
"
# Worst performing keywords
worst_keywords_html = ""
if worst_keywords.get('by_ctr') or worst_keywords.get('by_position'):
worst_keywords_html = "⚠️ Worst Performing Keywords
"
if worst_keywords.get('by_ctr'):
worst_keywords_html += "By CTR (Low Click-Through Rate)
"
worst_keywords_html += "Keyword | Position | Impressions | CTR |
"
for kw in worst_keywords['by_ctr'][:10]:
worst_keywords_html += f"""
{kw.get('keyword', '')} |
{kw.get('rank', 0)} |
{kw.get('impressions', 0)} |
{kw.get('estimated_ctr', 0):.2f}% |
"""
worst_keywords_html += "
"
if worst_keywords.get('by_position'):
worst_keywords_html += "By Position (Poor Rankings)
"
worst_keywords_html += "Keyword | Position | Impressions |
"
for kw in worst_keywords['by_position'][:10]:
worst_keywords_html += f"""
{kw.get('keyword', '')} |
{kw.get('rank', 0)} |
{kw.get('impressions', 0)} |
"""
worst_keywords_html += "
"
return f"""
🔍 Keyword Rankings Analysis
{pos_dist.get('top_10', 0)}
Top 10 Rankings
{len(opportunity_keywords)}
Opportunities
{keywords_data.get('data_source', 'Unknown')}
Data Source
{pos_chart}
{best_keywords_html}
{worst_keywords_html}
{opportunity_html}
"""
def _generate_backlinks_section(self, backlinks_data: Dict[str, Any]) -> str:
"""Generate backlinks analysis section"""
if backlinks_data.get('placeholder'):
return f"""
🔗 Backlink Profile
No backlink data available.
{backlinks_data.get('message', 'Add RapidAPI key to unlock comprehensive backlink insights.')}
"""
total_backlinks = backlinks_data.get('total_backlinks', 0)
total_ref_domains = backlinks_data.get('total_ref_domains', 0)
domain_rating = backlinks_data.get('domain_rating', 0)
monthly_changes = backlinks_data.get('monthly_changes', {})
referring_domains = backlinks_data.get('referring_domains', [])
anchor_distribution = backlinks_data.get('anchor_distribution', [])
new_backlinks = backlinks_data.get('new_backlinks_30d', 0)
lost_backlinks = backlinks_data.get('lost_backlinks_30d')
data_source = backlinks_data.get('data_source', 'Unknown')
# Create anchor text distribution chart
anchor_chart = ""
if anchor_distribution:
import plotly.graph_objects as go
from plotly.offline import plot
anchors = [a.get('anchor_text', '')[:30] for a in anchor_distribution[:10]]
counts = [a.get('backlinks', 0) for a in anchor_distribution[:10]]
fig = go.Figure(data=[go.Bar(x=anchors, y=counts)])
fig.update_layout(title="Top Anchor Text Distribution", height=400, xaxis={'tickangle': 45})
anchor_chart = plot(fig, include_plotlyjs=False, output_type='div')
ref_domains_html = ""
if referring_domains:
ref_domains_html = "🏢 Top Referring Domains
Domain | Domain Rating | Backlinks | First Seen |
"
for rd in referring_domains[:10]:
ref_domains_html += f"""
{rd.get('domain', '')} |
{rd.get('domain_rating', 0)} |
{rd.get('backlinks', 0)} |
{rd.get('first_seen', 'N/A')} |
"""
ref_domains_html += "
"
lost_display = "N/A (future work)" if lost_backlinks is None else str(lost_backlinks)
return f"""
🔗 Backlink Profile Analysis
Source: {data_source}
{total_backlinks:,}
Total Backlinks
{total_ref_domains:,}
Referring Domains
{domain_rating}
Domain Rating
{new_backlinks}
New Links (30d)
{lost_display}
Lost Links (30d)
{anchor_chart}
{ref_domains_html}
"""
def _generate_recommendations_section(self, llm_recommendations: Dict[str, Any]) -> str:
"""Generate LLM-powered recommendations section with markdown rendering"""
if not llm_recommendations:
return ""
recommendations_markdown = llm_recommendations.get('recommendations_markdown', '')
executive_insights = llm_recommendations.get('executive_insights', [])
priority_actions = llm_recommendations.get('priority_actions', [])
# Skip executive insights and priority actions - show only markdown
insights_html = ""
priority_html = ""
# Convert markdown recommendations to HTML
recommendations_html = ""
if recommendations_markdown:
recommendations_html = f"""
🤖 AI-Generated Recommendations
{self._markdown_to_html(recommendations_markdown)}
"""
return f"""
🧠 Smart Recommendations
Generated by {llm_recommendations.get('data_source', 'AI Analysis')}
{insights_html}
{priority_html}
{recommendations_html}
"""
def _get_report_template(self) -> str:
"""Get the HTML template for the report"""
return """
SEO Report - {url}
📊 Executive Summary
{executive_summary}
📈 Performance Charts
{charts}
⚡ Technical SEO
{technical_section}
📝 Content Audit
{content_section}
🔍 Keywords Analysis
{keywords_section}
🔗 Backlinks Profile
{backlinks_section}
{competitor_section}
{recommendations}
{llm_recommendations}
"""