Skip to main content

Python SDK - Examples

import os

from wrynai import Engine, LinkType, ValidationError, WrynAI, WrynAIError


def main():
# Get API key from environment variable
api_key = os.environ.get("WRYNAI_API_KEY", "your-api-key-here")

# Initialize the client
client = WrynAI(api_key=api_key)

try:
# =====================================================================
# Example 1: Extract Page Title
# =====================================================================
print("=" * 60)
print("Example 1: Extract Page Title")
print("=" * 60)

result = client.extract_title("https://example.com")
print(f"Title: {result.title}")
print()

# =====================================================================
# Example 2: Extract Text Content
# =====================================================================
print("=" * 60)
print("Example 2: Extract Text Content")
print("=" * 60)

result = client.extract_text(
url="https://example.com",
extract_main_content=True,
engine=Engine.SIMPLE,
)
print(f"Text (first 200 chars): {result.text[:200]}...")
print()

# =====================================================================
# Example 3: Extract Structured Text
# =====================================================================
print("=" * 60)
print("Example 3: Extract Structured Text")
print("=" * 60)

result = client.extract_structured_text("https://example.com")

print("Headings:")
for heading in result.headings[:5]: # First 5 headings
print(f" {heading.tag}: {heading.text}")

print("\nLinks (first 5):")
for link in result.links[:5]:
print(f" {link.text}: {link.url}")
print()

# =====================================================================
# Example 4: Extract Markdown
# =====================================================================
print("=" * 60)
print("Example 4: Extract Markdown")
print("=" * 60)

result = client.extract_markdown("https://example.com")
print(f"Markdown (first 300 chars):\n{result.markdown[:300]}...")
print()

# =====================================================================
# Example 5: Extract Links (Internal Only)
# =====================================================================
print("=" * 60)
print("Example 5: Extract Internal Links")
print("=" * 60)

result = client.extract_links(
url="https://example.com",
links=LinkType.INTERNAL,
)
print(f"Found {len(result.links)} internal links")
for link in result.links[:5]:
print(f" - {link.url}")
print()

except ValidationError as e:
print(f"Validation Error: {e}")
except WrynAIError as e:
print(f"API Error: {e}")
finally:
# Always close the client
client.close()


if __name__ == "__main__":
main()

Convert any website into API ready data

Automatically Extract listing and fields

def auto_listing_example():
"""Smart extraction example (PRO feature)."""
api_key = os.environ.get("WRYNAI_API_KEY", "your-api-key-here")

with WrynAI(api_key=api_key) as client:
print("=" * 60)
print("Smart Extraction (Auto Listing) Example - PRO")
print("=" * 60)

try:
# Extract structured data from a listing page
result = client.auto_listing(
url="https://www.amazon.com/s?k=laptop",
extract_main_content=True,
timeout_ms=60000, # 1 minute
)

print(f"\nExtracted {len(result.items)} items")
print()

# Display extracted items
for i, item in enumerate(result.items[:5], 1): # First 5 items
print(f"Item {i}:")

# Access item properties dynamically
if item.get("title"):
print(f" Title: {item.get('title')}")
if item.get("price"):
print(f" Price: {item.get('price')}")
if item.get("rating"):
print(f" Rating: {item.get('rating')}")
if item.get("url"):
print(f" URL: {item.get('url')}")
print()

except WrynAIError as e:
print(f"Auto listing extraction failed: {e}")

Take screenshot

def screenshot_example():
"""Screenshot capture example."""
api_key = os.environ.get("WRYNAI_API_KEY", "your-api-key-here")

with WrynAI(api_key=api_key) as client:
print("=" * 60)
print("Screenshot Capture Example")
print("=" * 60)

try:
# Capture viewport screenshot
print("\nCapturing viewport screenshot...")
result = client.take_screenshot(
url="https://example.com",
screenshot_type=ScreenshotType.VIEWPORT,
timeout_ms=30000,
)

# Save the screenshot
if result.screenshot:
# Remove data URL prefix if present
image_data = result.screenshot
if "," in image_data:
image_data = image_data.split(",")[1]

filename = "screenshot_viewport.png"
with open(filename, "wb") as f:
f.write(base64.b64decode(image_data))
print(f"Saved viewport screenshot to: {filename}")

# Capture full page screenshot
print("\nCapturing full page screenshot...")
result = client.take_screenshot(
url="https://example.com",
screenshot_type=ScreenshotType.FULLPAGE,
timeout_ms=45000, # Full page may take longer
)

if result.screenshot:
image_data = result.screenshot
if "," in image_data:
image_data = image_data.split(",")[1]

filename = "screenshot_fullpage.png"
with open(filename, "wb") as f:
f.write(base64.b64decode(image_data))
print(f"Saved full page screenshot to: {filename}")

except WrynAIError as e:
print(f"Screenshot capture failed: {e}")

Crawl Site with url patterns

def crawl_documentation_example():
"""Example: Crawl a documentation site."""
api_key = os.environ.get("WRYNAI_API_KEY", "your-api-key-here")

with WrynAI(api_key=api_key) as client:
print("=" * 60)
print("Documentation Crawl Example")
print("=" * 60)

try:
# Crawl only documentation pages
result = client.crawl(
url="https://docs.example.com",
max_pages=10,
max_depth=3,
include_patterns=["/docs/", "/guide/", "/api/"],
exclude_patterns=["/docs/internal/", "/docs/draft/"],
)

print(f"Crawled {result.total_visited} documentation pages")

# Collect all content for processing
all_content = []
for page in result.pages:
all_content.append(
{
"url": page.page_url,
"content": page.content,
}
)

# Now you can process the documentation content
print(f"Collected {len(all_content)} pages of documentation")

except WrynAIError as e:
print(f"Documentation crawl failed: {e}")