-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathhtg_product_scraper2_merged.py
More file actions
142 lines (120 loc) · 4.88 KB
/
htg_product_scraper2_merged.py
File metadata and controls
142 lines (120 loc) · 4.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import asyncio
import json
import random
import time
import os
from playwright.async_api import async_playwright
# ── NEW IMPORT FOR TEMPLATE RENDERING ──
from jinja2 import Environment, FileSystemLoader, select_autoescape
# ── HELPER #1: Render HTML from template ──
def render_html_output(template_dir, template_name, out_filename, context):
"""
Renders a Jinja2 HTML template with the given context dict and writes to out_filename.
- template_dir: folder where product_template.html lives
- template_name: "product_template.html"
- out_filename: e.g. "output_my-product-slug.html"
- context: {"title": ..., "price": ..., "description": ..., "specifications": {...}}
"""
env = Environment(
loader=FileSystemLoader(searchpath=template_dir),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template(template_name)
rendered = template.render(context)
# Write to file
with open(out_filename, "w", encoding="utf-8") as f:
f.write(rendered)
print(f"[+] Wrote HTML output to: {out_filename}")
# ── HELPER #2: Create a filesystem‐safe slug for each product ──
def slugify(text):
"""
Simplest slugify: lowercase, replace spaces/non-alphanumeric with hyphens.
"""
import re
text = text.lower().strip()
slug = re.sub(r'[^a-z0-9]+', '-', text)
slug = re.sub(r'-{2,}', '-', slug).strip('-')
return slug or "product"
async def scrape_product(page, url):
"""
Navigate to `url`, extract title/price/description/specifications,
then call render_html_output(...) to write a styled HTML file.
Return a dict of the raw data if you want to save it elsewhere.
"""
await page.goto(url, timeout=60000)
# ── EXTRACTION LOGIC ──
# Adjust these selectors to match the real site structure.
title = await page.text_content("h1.product-title")
price = await page.text_content(".product-price")
description = await page.inner_html("#tab-description") # raw HTML inside "Description" tab
# SPECIFICATIONS: DOM traversal instead of invalid selectors
specs = await page.evaluate("""() => {
const items = document.querySelectorAll('.accordion-item');
for (const item of items) {
const titleElem = item.querySelector('.accordion-title');
if (!titleElem) continue;
const text = titleElem.textContent.toLowerCase();
if (text.includes('specifications') || text.includes('specs')) {
const table = item.querySelector('table');
if (!table) return null;
const rows = table.querySelectorAll('tr');
const specs_dict = {};
rows.forEach(row => {
const cells = row.querySelectorAll('td, th');
if (cells.length >= 2) {
const key = cells[0].innerText.trim();
const value = cells[1].innerText.trim();
specs_dict[key] = value;
}
});
return specs_dict;
}
}
return null;
}""")
# If no specs found, set to {}
if specs is None:
specs = {}
# ── Prepare context data ──
product_data = {
"title": title.strip() if title else "No Title",
"price": price.strip() if price else "N/A",
"description": description or "",
"specifications": specs
}
# Decide on an output filename
slug = slugify(product_data["title"])
html_filename = f"output_{slug}.html"
# Assume the template is in the same folder as this script:
template_dir = os.path.dirname(os.path.abspath(__file__))
render_html_output(
template_dir=template_dir,
template_name="product_template.html",
out_filename=html_filename,
context=product_data
)
return product_data
async def main():
# ── LIST OF PRODUCT URLs TO SCRAPE ──
product_urls = [
"https://www.htgsupply.com/products/digital-greenhouse-hhp-1000w-de-grow-light/#tab-description",
# Add more URLs as needed
]
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
page = await browser.new_page()
all_data = []
for url in product_urls:
try:
data = await scrape_product(page, url)
all_data.append(data)
# Optional: sleep a bit to avoid hammering the server
time.sleep(random.uniform(1.0, 2.5))
except Exception as e:
print(f"[!] Error scraping {url}: {e}")
# Optionally: save all extracted data to a JSON file
with open("all_products.json", "w", encoding="utf-8") as jf:
json.dump(all_data, jf, indent=2)
await browser.close()
if __name__ == "__main__":
asyncio.run(main())