Compare commits

..

No commits in common. "master" and "v0.14.4" have entirely different histories.

73 changed files with 2931 additions and 2921 deletions

View File

@ -1,19 +0,0 @@
FROM sherlock/sherlock as sherlock
# Install Node.js
RUN apt-get update; apt-get install curl gpg -y
RUN mkdir -p /etc/apt/keyrings
RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
RUN echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
RUN apt-get update && apt-get install -y curl bash git jq jo xz-utils nodejs
# Install Apify CLI (node.js) for the Actor Runtime
RUN npm -g install apify-cli
# Install Dependencies for the Actor Shell Script
RUN apt-get update && apt-get install -y bash jq jo xz-utils nodejs
# Copy Actor dir with the actorization shell script
COPY .actor/ .actor
ENTRYPOINT [".actor/actor.sh"]

View File

@ -1,93 +0,0 @@
# Sherlock Actor on Apify
[![Sherlock Actor](https://apify.com/actor-badge?actor=netmilk/sherlock)](https://apify.com/netmilk/sherlock?fpr=sherlock)
This Actor wraps the [Sherlock Project](https://sherlockproject.xyz/) to provide serverless username reconnaissance across social networks in the cloud. It helps you find usernames across multiple social media platforms without installing and running the tool locally.
## What are Actors?
[Actors](https://docs.apify.com/platform/actors?fpr=sherlock) are serverless microservices running on the [Apify Platform](https://apify.com/?fpr=sherlock). They are based on the [Actor SDK](https://docs.apify.com/sdk/js?fpr=sherlock) and can be found in the [Apify Store](https://apify.com/store?fpr=sherlock). Learn more about Actors in the [Apify Whitepaper](https://whitepaper.actor?fpr=sherlock).
## Usage
### Apify Console
1. Go to the Apify Actor page
2. Click "Run"
3. In the input form, fill in **Username(s)** to search for
4. The Actor will run and produce its outputs in the default datastore
### Apify CLI
```bash
apify call YOUR_USERNAME/sherlock --input='{
"usernames": ["johndoe", "janedoe"]
}'
```
### Using Apify API
```bash
curl --request POST \
--url "https://api.apify.com/v2/acts/YOUR_USERNAME~sherlock/run" \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer YOUR_API_TOKEN' \
--data '{
"usernames": ["johndoe", "janedoe"],
}
}'
```
## Input Parameters
The Actor accepts a JSON schema with the following structure:
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `usernames` | array | Yes | - | List of usernames to search for |
| `usernames[]` | string | Yes | "json" | Username to search for |
### Example Input
```json
{
"usernames": ["techuser", "designuser"],
}
```
## Output
The Actor provides three types of outputs:
### Dataset Record*
| Field | Type | Required | Description |
|-------|------|----------|-------------|
| `username` | string | Yes | Username the search was conducted for |
| `links` | array | Yes | Array with found links to the social media |
| `links[]`| string | No | URL to the account
### Example Dataset Item (JSON)
```json
{
"username": "johndoe",
"links": [
"https://github.com/johndoe"
]
}
```
## Performance & Resources
- **Memory Requirements**:
- Minimum: 512 MB RAM
- Recommended: 1 GB RAM for multiple usernames
- **Processing Time**:
- Single username: ~1-2 minutes
- Multiple usernames: 2-5 minutes
- Varies based on number of sites checked and response times
For more help, check the [Sherlock Project documentation](https://github.com/sherlock-project/sherlock) or raise an issue in the Actor's repository.

View File

@ -1,13 +0,0 @@
{
"actorSpecification": 1,
"name": "sherlock",
"version": "0.0",
"buildTag": "latest",
"environmentVariables": {},
"dockerFile": "./Dockerfile",
"dockerContext": "../",
"input": "./input_schema.json",
"storages": {
"dataset": "./dataset_schema.json"
}
}

View File

@ -1,14 +0,0 @@
#!/bin/bash
INPUT=`apify actor:get-input | jq -r .usernames[] | xargs echo`
echo "INPUT: $INPUT"
sherlock $INPUT
for username in $INPUT; do
# escape the special meaning leading characters
# https://github.com/jpmens/jo/blob/master/jo.md#description
safe_username=$(echo $username | sed 's/^@/\\@/' | sed 's/^:/\\:/' | sed 's/%/\\%/')
echo "pushing results for username: $username, content:"
cat $username.txt
sed '$d' $username.txt | jo -a | jo username=$safe_username links:=- | apify actor:push-data
done

View File

@ -1,45 +0,0 @@
{
"actorSpecification": 1,
"fields":{
"title": "Sherlock actor input",
"description": "This is actor input schema",
"type": "object",
"schemaVersion": 1,
"properties": {
"links": {
"title": "Links to accounts",
"type": "array",
"description": "A list of social media accounts found for the uername"
},
"username": {
"title": "Lookup username",
"type": "string",
"description": "Username the lookup was performed for"
}
},
"required": [
"username",
"links"
]
},
"views": {
"overview": {
"title": "Overview",
"transformation": {
"fields": [
"username",
"links"
],
},
"display": {
"component": "table",
"links": {
"label": "Links"
},
"username":{
"label": "Username"
}
}
}
}
}

View File

@ -1,18 +0,0 @@
{
"title": "Sherlock actor input",
"description": "This is actor input schema",
"type": "object",
"schemaVersion": 1,
"properties": {
"usernames": {
"title": "Usernames to hunt down",
"type": "array",
"description": "A list of usernames to be checked for existence across social media",
"editor": "stringList",
"prefill": ["johndoe"]
}
},
"required": [
"usernames"
]
}

View File

@ -5,4 +5,4 @@ tests/
*.txt
!/requirements.txt
venv/
devel/

15
.github/CODEOWNERS vendored
View File

@ -1,15 +0,0 @@
### REPOSITORY
/.github/CODEOWNERS @sdushantha @ppfeister
/.github/FUNDING.yml @sdushantha
/LICENSE @sdushantha
### PACKAGING
# Changes made to these items without code owner approval may negatively
# impact packaging pipelines.
/pyproject.toml @ppfeister @sdushantha
### REGRESSION
/.github/workflows/regression.yml @ppfeister
/tox.ini @ppfeister
/pytest.ini @ppfeister
/tests/ @ppfeister

1
.github/FUNDING.yml vendored
View File

@ -1 +0,0 @@
github: [ sdushantha, ppfeister, matheusfelipeog ]

38
.github/ISSUE_TEMPLATE/bug-report.md vendored Normal file
View File

@ -0,0 +1,38 @@
---
name: Bug report
about: Report a bug in Sherlock's functionality
title: ''
labels: bug
assignees: ''
---
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Put x into all boxes (like this [x]) once you have completed what they say.
Make sure complete everything in the checklist.
-->
- [ ] I'm reporting a bug in Sherlock's functionality
- [ ] The bug I'm reporting is not a false positive or a false negative
- [ ] I've verified that I'm running the latest version of Sherlock
- [ ] I've checked for similar bug reports including closed ones
- [ ] I've checked for pull requests that attempt to fix this bug
## Description
<!--
Provide a detailed description of the bug that you have found in Sherlock.
Provide the version of Sherlock you are running.
-->
WRITE DESCRIPTION HERE

View File

@ -1,71 +0,0 @@
name: Bug report
description: File a bug report
labels: ["bug"]
body:
- type: dropdown
id: package
attributes:
label: Installation method
description: |
Some packages are maintained by the community, rather than by the Sherlock Project.
Knowing which packages are affected helps us diagnose package-specific bugs.
options:
- Select one
- PyPI (via pip)
- Homebrew
- Docker
- Kali repository (via apt)
- Built from source
- Other (indicate below)
validations:
required: true
- type: input
id: package-version
attributes:
label: Package version
description: |
Knowing the version of the package you are using can help us diagnose your issue more quickly.
You can find the version by running `sherlock --version`.
validations:
required: true
- type: textarea
id: description
attributes:
label: Description
description: |
Detailed descriptions that help contributors understand and reproduce your bug are much more likely to lead to a fix.
Please include the following information:
- What you were trying to do
- What you expected to happen
- What actually happened
placeholder: |
When doing {action}, the expected result should be {expected result}.
When doing {action}, however, the actual result was {actual result}.
This is undesirable because {reason}.
validations:
required: true
- type: textarea
id: steps-to-reproduce
attributes:
label: Steps to reproduce
description: Write a step by step list that will allow us to reproduce this bug.
placeholder: |
1. Do something
2. Then do something else
validations:
required: true
- type: textarea
id: additional-info
attributes:
label: Additional information
description: If you have some additional information, please write it here.
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@ -1 +0,0 @@
blank_issues_enabled: false

View File

@ -1,27 +0,0 @@
name: False negative
description: Report a site that is returning false negative results
title: "False negative for: "
labels: ["false negative"]
body:
- type: markdown
attributes:
value: |
Please include the site name in the title of your issue.
Submit **one site per report** for faster resolution. If you have multiple sites in the same report, it often takes longer to fix.
- type: textarea
id: additional-info
attributes:
label: Additional info
description: If you know why the site is returning false negatives, or noticed any patterns, please explain.
placeholder: |
Reddit is returning false negatives because...
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@ -1,28 +0,0 @@
name: False positive
description: Report a site that is returning false positive results
title: "False positive for: "
labels: ["false positive"]
body:
- type: markdown
attributes:
value: |
Please include the site name in the title of your issue.
Submit **one site per report** for faster resolution. If you have multiple sites in the same report, it often takes longer to fix.
- type: textarea
id: additional-info
attributes:
label: Additional info
description: If you know why the site is returning false positives, or noticed any patterns, please explain.
placeholder: |
Reddit is returning false positives because...
False positives only occur after x searches...
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@ -0,0 +1,32 @@
---
name: Feature request
about: Request a new functionality for Sherlock
title: ''
labels: enhancement
assignees: ''
---
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Put x into all boxes (like this [x]) once you have completed what they say.
Make sure complete everything in the checklist.
-->
- [ ] I'm reporting a feature request
- [ ] I've checked for similar feature requests including closed ones
## Description
<!--
Provide a detailed description of the feature you would like Sherlock to have
-->
WRITE DESCRIPTION HERE

View File

@ -1,24 +0,0 @@
name: Feature request
description: Request a feature or enhancement
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
Concise and thoughtful titles help other contributors find and add your requested feature.
- type: textarea
id: description
attributes:
label: Description
description: Describe the feature you are requesting
placeholder: I'd like Sherlock to be able to do xyz
validations:
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

33
.github/ISSUE_TEMPLATE/question.md vendored Normal file
View File

@ -0,0 +1,33 @@
---
name: Question
about: Ask us a question
title: ''
labels: question
assignees: ''
---
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE.
######################################################################
-->
## Checklist
<!--
Put x into all boxes (like this [x]) once you have completed what they say.
Make sure complete everything in the checklist.
-->
- [ ] I'm asking a question regarding Sherlock
- [ ] My question is not a tech support question.
**We are not your tech support**.
If you have questions related to `pip`, `git`, or something that is not related to Sherlock, please ask them on [Stack Overflow](https://stackoverflow.com/) or [r/learnpython](https://www.reddit.com/r/learnpython/)
## Question
ASK YOUR QUESTION HERE

View File

@ -0,0 +1,34 @@
---
name: Reporting false negative
about: Reporting a site that is returning false positives
title: ''
labels: false negative
assignees: ''
---
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Put x into all boxes (like this [x]) once you have completed what they say.
Make sure complete everything in the checklist.
-->
- [ ] I'm reporting a website that is returning **false negative** results
- [ ] I've checked for similar site support requests including closed ones
- [ ] I've checked for pull requests attempting to fix this false negative
- [ ] I'm only reporting **one** site (create a separate issue for each site)
## Description
<!--
Provide the username that is causing Sherlock to return a false negative, along with any other information that might help us fix this false negative.
-->
WRITE DESCRIPTION HERE

View File

@ -0,0 +1,34 @@
---
name: Reporting false positive
about: Reporting a site that is returning false positives
title: ''
labels: false positive
assignees: ''
---
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Put x into all boxes (like this [x]) once you have completed what they say.
Make sure complete everything in the checklist.
-->
- [ ] I'm reporting a website that is returning **false positive** results
- [ ] I've checked for similar site support requests including closed ones
- [ ] I've checked for pull requests attempting to fix this false positive
- [ ] I'm only reporting **one** site (create a separate issue for each site)
## Description
<!--
Provide the username that is causing Sherlock to return a false positive, along with any other information that might help us fix this false positive.
-->
WRITE DESCRIPTION HERE

View File

@ -1,35 +0,0 @@
name: Reuest a new website
description: Request that Sherlock add support for a new website
title: "Requesting support for: "
labels: ["site support request"]
body:
- type: markdown
attributes:
value: |
Ensure that the site name is in the title of your request. Requests without this information will be **closed**.
- type: input
id: site-url
attributes:
label: Site URL
description: |
What is the URL of the website indicated in your title?
Websites sometimes have similar names. This helps constributors find the correct site.
placeholder: https://reddit.com
validations:
required: true
- type: textarea
id: additional-info
attributes:
label: Additional info
description: If you have suggestions on how Sherlock should detect for usernames, please explain below
placeholder: Sherlock can detect if a username exists on Reddit by checking for...
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@ -0,0 +1,37 @@
---
name: Site support request
about: Request support for a new site
title: ''
labels: site support request
assignees: ''
---
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Put x into all boxes (like this [x]) once you have completed what they say.
Make sure complete everything in the checklist.
-->
- [ ] I'm requesting support for a new site
- [ ] I've checked for similar site support requests including closed ones
- [ ] I've checked that the site I am requesting has not been removed in the past and is not documented in [removed_sites.md](https://github.com/sherlock-project/sherlock/blob/master/removed_sites.md)
- [ ] The site I am requesting support for is not a pornographic website
- [ ] I'm only requesting support of **one** website (create a separate issue for each site)
## Description
<!--
Provide the url to the website and the name of the website.
If there is anything else you want to mention regarding the site support request include that in this section.
-->
URL:

11
.github/SECURITY.md vendored
View File

@ -1,11 +0,0 @@
## Security Policy
### Supported Versions
Sherlock is a forward looking project. Only the latest and most current version is supported.
### Reporting a Vulnerability
Security concerns can be submitted [__here__][report-url] without risk of exposing sensitive information. For issues that are low severity or unlikely to see exploitation, public issues are often acceptable.
[report-url]: https://github.com/sherlock-project/sherlock/security/advisories/new

View File

@ -1,89 +0,0 @@
name: Exclusions Updater
on:
schedule:
#- cron: '0 5 * * 0' # Runs at 05:00 every Sunday
- cron: '0 5 * * *' # Runs at 05:00 every day
workflow_dispatch:
jobs:
update-exclusions:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: '3.13'
- name: Install Poetry
uses: abatilo/actions-poetry@v4
with:
poetry-version: 'latest'
- name: Install dependencies
run: |
poetry install --no-interaction --with dev
- name: Run false positive tests
run: |
$(poetry env activate)
pytest -q --tb no -m validate_targets_fp -n 20 | tee fp_test_results.txt
deactivate
- name: Parse false positive detections by desired categories
run: |
grep -oP '(?<=test_false_pos\[)[^\]]+(?=\].*result was Claimed)' fp_test_results.txt \
| sort -u > false_positive_exclusions.txt
grep -oP '(?<=test_false_pos\[)[^\]]+(?=\].*result was WAF)' fp_test_results.txt \
| sort -u > waf_hits.txt
- name: Detect if exclusions list changed
id: detect_changes
run: |
git fetch origin exclusions || true
if git show origin/exclusions:false_positive_exclusions.txt >/dev/null 2>&1; then
# If the exclusions branch and file exist, compare
if git diff --quiet origin/exclusions -- false_positive_exclusions.txt; then
echo "exclusions_changed=false" >> "$GITHUB_OUTPUT"
else
echo "exclusions_changed=true" >> "$GITHUB_OUTPUT"
fi
else
# If the exclusions branch or file do not exist, treat as changed
echo "exclusions_changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Quantify and display results
run: |
FP_COUNT=$(wc -l < false_positive_exclusions.txt | xargs)
WAF_COUNT=$(wc -l < waf_hits.txt | xargs)
echo ">>> Found $FP_COUNT false positives and $WAF_COUNT WAF hits."
echo ">>> False positive exclusions:" && cat false_positive_exclusions.txt
echo ">>> WAF hits:" && cat waf_hits.txt
- name: Commit and push exclusions list
if: steps.detect_changes.outputs.exclusions_changed == 'true'
run: |
git config user.name "Paul Pfeister (automation)"
git config user.email "code@pfeister.dev"
mv false_positive_exclusions.txt false_positive_exclusions.txt.tmp
git add -f false_positive_exclusions.txt.tmp # -f required to override .gitignore
git stash push -m "stash false positive exclusion list" -- false_positive_exclusions.txt.tmp
git fetch origin exclusions || true # Allows creation of branch if deleted
git checkout -B exclusions origin/exclusions || (git checkout --orphan exclusions && git rm -rf .)
git stash pop || true
mv false_positive_exclusions.txt.tmp false_positive_exclusions.txt
git rm -f false_positive_exclusions.txt.tmp || true
git add false_positive_exclusions.txt
git commit -m "auto: update exclusions list" || echo "No changes to commit"
git push origin exclusions

38
.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Tests
on:
push:
branches: [ master ]
jobs:
tests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- '3.12'
- '3.11'
- '3.10'
- '3.9'
- '3.8'
- '3.7'
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
pip install ruff flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with ruff
run: |
# stop the build if there are Python syntax errors or undefined names
ruff . --output-format=github --select=E9,F63,F7,F82
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Sherlock Site Detect Tests
run: |
cd sherlock && python -m unittest tests.all.SherlockDetectTests --verbose

27
.github/workflows/nightly.yml vendored Normal file
View File

@ -0,0 +1,27 @@
name: Nightly
on:
schedule:
# Run Nightly Tests At 3AM (The Hour Of The Wolf) Every Day
- cron: '0 3 * * *'
jobs:
tests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.x]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Sherlock Site Coverage Tests
run: |
cd sherlock && python -m unittest tests.all.SherlockSiteCoverageTests --verbose

47
.github/workflows/pull_request.yml vendored Normal file
View File

@ -0,0 +1,47 @@
name: Pull Request Action
on:
pull_request:
branches: [ master ]
jobs:
getchange:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.changes.outputs.matrix }}
steps:
- id: changes
run: |
URL="https://api.github.com/repos/sherlock-project/sherlock/pulls/${{ github.event.pull_request.number }}/files"
FILES=$(curl -s -X GET -G $URL | jq -r '.[] | .filename')
if echo $FILES | grep -q ".json"; then
echo "::set-output name=matrix::{\"include\":[{\"python\":\"3.x\"}]}"
else
echo "::set-output name=matrix::{\"include\":[{\"python\":\"3.7\"},{\"python\":\"3.8\"}]},{\"python\":\"3.9\"},{\"python\":\"3.10\"}]},{\"python\":\"3.11\"},{\"python\":\"3.12\"}]}"
fi
tests:
needs: [getchange]
runs-on: ubuntu-latest
strategy:
matrix: ${{ fromJson(needs.getchange.outputs.matrix) }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
pip install ruff flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint With Ruff
run: |
# stop the build if there are Python syntax errors or undefined names
ruff check . --output-format=github --select=E9,F63,F7,F82
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Sherlock Site Detect Tests
run: |
cd sherlock && python -m unittest tests.all.SherlockDetectTests --verbose

View File

@ -1,92 +0,0 @@
name: Regression Testing
on:
pull_request:
branches:
- master
- release/**
paths:
- '.github/workflows/regression.yml'
- '**/*.json'
- '**/*.py'
- '**/*.ini'
- '**/*.toml'
- 'Dockerfile'
push:
branches:
- master
- release/**
paths:
- '.github/workflows/regression.yml'
- '**/*.json'
- '**/*.py'
- '**/*.ini'
- '**/*.toml'
- 'Dockerfile'
jobs:
tox-lint:
runs-on: ubuntu-latest
# Linting is ran through tox to ensure that the same linter
# is used by local runners
steps:
- uses: actions/checkout@v4
- name: Set up linting environment
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install tox and related dependencies
run: |
python -m pip install --upgrade pip
pip install tox
- name: Run tox linting environment
run: tox -e lint
tox-matrix:
runs-on: ${{ matrix.os }}
strategy:
# We want to know what specicic versions it fails on
fail-fast: false
matrix:
os: [
ubuntu-latest,
windows-latest,
macos-latest,
]
python-version: [
'3.10',
'3.11',
'3.12',
'3.13',
]
steps:
- uses: actions/checkout@v4
- name: Set up environment ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install tox and related dependencies
run: |
python -m pip install --upgrade pip
pip install tox
pip install tox-gh-actions
- name: Run tox
run: tox
docker-build-test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Get version from pyproject.toml
id: get-version
run: |
VERSION=$(grep -m1 'version = ' pyproject.toml | cut -d'"' -f2)
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Build Docker image
run: |
docker build \
--build-arg VERSION_TAG=${{ steps.get-version.outputs.version }} \
-t sherlock-test:latest .
- name: Test Docker image runs
run: docker run --rm sherlock-test:latest --version

View File

@ -1,13 +1,13 @@
name: Update Site List
name: Update Site List
# Trigger the workflow when changes are pushed to the main branch
# and the changes include the sherlock_project/resources/data.json file
# and the changes include the sherlock/resources/data.json file
on:
push:
branches:
- master
- master
paths:
- sherlock_project/resources/data.json
- sherlock/resources/data.json
jobs:
sync-json-data:
@ -26,21 +26,24 @@ jobs:
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: '3.x'
python-version: '3.x'
# Execute the site_list.py Python script
- name: Execute site-list.py
run: python devel/site-list.py
- name: Execute site_list.py
run: python site_list.py
- name: Pushes to another repository
uses: sdushantha/github-action-push-to-another-repository@main
env:
SSH_DEPLOY_KEY: ${{ secrets.SSH_DEPLOY_KEY }}
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
# Commit any changes made by the script
- name: Commit files
run: |
git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
if ! git diff --exit-code; then
git commit -a -m "Updated Site List"
fi
# Push the changes to the remote repository
- name: Push changes
uses: ad-m/github-push-action@master
with:
source-directory: 'output'
destination-github-username: 'sherlock-project'
commit-message: 'Updated site list'
destination-repository-name: 'sherlockproject.xyz'
user-email: siddharth.dushantha@gmail.com
target-branch: master
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}

View File

@ -1,126 +0,0 @@
name: Modified Target Validation
on:
pull_request_target:
branches:
- master
paths:
- "sherlock_project/resources/data.json"
jobs:
validate-modified-targets:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
# Checkout the base branch but fetch all history to avoid a second fetch call
ref: ${{ github.base_ref }}
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.13"
- name: Install Poetry
uses: abatilo/actions-poetry@v4
with:
poetry-version: "latest"
- name: Install dependencies
run: |
poetry install --no-interaction --with dev
- name: Prepare JSON versions for comparison
run: |
# Fetch only the PR's branch head (single network call in this step)
git fetch origin pull/${{ github.event.pull_request.number }}/head:pr
# Find the merge-base commit between the target branch and the PR branch
MERGE_BASE=$(git merge-base origin/${{ github.base_ref }} pr)
echo "Comparing PR head against merge-base commit: $MERGE_BASE"
# Safely extract the file from the PR's head and the merge-base commit
git show pr:sherlock_project/resources/data.json > data.json.head
git show $MERGE_BASE:sherlock_project/resources/data.json > data.json.base
# CRITICAL FIX: Overwrite the checked-out data.json with the one from the PR
# This ensures that pytest runs against the new, updated file.
cp data.json.head sherlock_project/resources/data.json
- name: Discover modified targets
id: discover-modified
run: |
CHANGED=$(
python - <<'EOF'
import json
import sys
try:
with open("data.json.base") as f: base = json.load(f)
with open("data.json.head") as f: head = json.load(f)
except FileNotFoundError as e:
print(f"Error: Could not find {e.filename}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError as e:
print(f"Error: Could not decode JSON from a file - {e}", file=sys.stderr)
sys.exit(1)
changed = []
for k, v in head.items():
if k not in base or base[k] != v:
changed.append(k)
print(",".join(sorted(changed)))
EOF
)
# Preserve changelist
echo -e ">>> Changed targets: \n$(echo $CHANGED | tr ',' '\n')"
echo "changed_targets=$CHANGED" >> "$GITHUB_OUTPUT"
- name: Validate remote manifest against local schema
if: steps.discover-modified.outputs.changed_targets != ''
run: |
poetry run pytest tests/test_manifest.py::test_validate_manifest_against_local_schema
# --- The rest of the steps below are unchanged ---
- name: Validate modified targets
if: steps.discover-modified.outputs.changed_targets != ''
continue-on-error: true
run: |
poetry run pytest -q --tb no -rA -m validate_targets -n 20 \
--chunked-sites "${{ steps.discover-modified.outputs.changed_targets }}" \
--junitxml=validation_results.xml
- name: Prepare validation summary
if: steps.discover-modified.outputs.changed_targets != ''
id: prepare-summary
run: |
summary=$(
poetry run python devel/summarize_site_validation.py validation_results.xml || echo "Failed to generate summary of test results"
)
echo "$summary" > validation_summary.md
- name: Announce validation results
if: steps.discover-modified.outputs.changed_targets != ''
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
const body = fs.readFileSync('validation_summary.md', 'utf8');
await github.rest.issues.createComment({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body,
});
- name: This step shows as ran when no modifications are found
if: steps.discover-modified.outputs.changed_targets == ''
run: |
echo "No modified targets found"

11
.gitignore vendored
View File

@ -1,13 +1,8 @@
# Virtual Environments
# Virtual Environment
venv/
bin/
lib/
pyvenv.cfg
poetry.lock
# Regression Testing
.coverage
.tox/
# Editor Configurations
.vscode/
@ -19,10 +14,6 @@ __pycache__/
# Pip
src/
# Devel, Build, and Installation
*.egg-info/
dist/**
# Jupyter Notebook
.ipynb_checkpoints
*.ipynb

31
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,31 @@
# How To Contribute To Sherlock
First off, thank you for the help!
There are many ways to contribute. Here is some high level grouping.
## Adding New Sites
Please look at the Wiki entry on
[adding new sites](https://github.com/sherlock-project/sherlock/wiki/Adding-Sites-To-Sherlock)
to understand the issues.
Any new sites that are added need to have a username that has been claimed, and one
that is unclaimed documented in the site data. This allows the regression tests
to ensure that everything is working.
It is required that a contributor test any new sites by either running the full tests, or running
a site-specific query against the claimed and unclaimed usernames.
It is not required that a contributor run the
[site_list.py](https://github.com/sherlock-project/sherlock/blob/master/site_list.py)
script.
If there are performance problems with a site (e.g. slow to respond, unreliable uptime, ...), then
the site may be removed from the list. The
[removed_sites.md](https://github.com/sherlock-project/sherlock/blob/master/removed_sites.md)
file contains sites that were included at one time in Sherlock, but had to be removed for
one reason or another.
## Adding New Functionality
Please ensure that the content on your branch passes all tests before submitting a pull request.

View File

@ -1,31 +1,26 @@
# Release instructions:
# 1. Update the version tag in the Dockerfile to match the version in sherlock/__init__.py
# 2. Update the VCS_REF tag to match the tagged version's FULL commit hash
# 3. Build image with BOTH latest and version tags
# i.e. `docker build -t sherlock/sherlock:0.16.0 -t sherlock/sherlock:latest .`
FROM python:3.11-slim-bullseye as build
WORKDIR /wheels
FROM python:3.12-slim-bullseye AS build
WORKDIR /sherlock
COPY requirements.txt /opt/sherlock/
RUN apt-get update \
&& apt-get install -y build-essential \
&& pip3 wheel -r /opt/sherlock/requirements.txt
RUN pip3 install --no-cache-dir --upgrade pip
FROM python:3.11-slim-bullseye
WORKDIR /opt/sherlock
FROM python:3.12-slim-bullseye
WORKDIR /sherlock
ARG VCS_REF= # CHANGE ME ON UPDATE
ARG VCS_REF
ARG VCS_URL="https://github.com/sherlock-project/sherlock"
ARG VERSION_TAG= # CHANGE ME ON UPDATE
ENV SHERLOCK_ENV=docker
LABEL org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url=$VCS_URL \
org.label-schema.name="Sherlock" \
org.label-schema.version=$VERSION_TAG \
website="https://sherlockproject.xyz"
org.label-schema.vcs-url=$VCS_URL
RUN pip3 install --no-cache-dir sherlock-project==$VERSION_TAG
COPY --from=build /wheels /wheels
COPY . /opt/sherlock/
WORKDIR /sherlock
RUN pip3 install --no-cache-dir -r requirements.txt -f /wheels \
&& rm -rf /wheels
ENTRYPOINT ["sherlock"]
WORKDIR /opt/sherlock/sherlock
ENTRYPOINT ["python", "sherlock.py"]

184
README.md Normal file
View File

@ -0,0 +1,184 @@
<p align=center>
<br>
<a href="https://sherlock-project.github.io/" target="_blank"><img src="https://user-images.githubusercontent.com/27065646/53551960-ae4dff80-3b3a-11e9-9075-cef786c69364.png"/></a>
<br>
<span>Hunt down social media accounts by username across <a href="https://github.com/sherlock-project/sherlock/blob/master/sites.md">social networks</a></span>
<br>
</p>
<p align="center">
<a href="#installation">Installation</a>
&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;
<a href="#usage">Usage</a>
&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;
<a href="#docker-notes">Docker Notes</a>
&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;
<a href="#contributing">Contributing</a>
</p>
<p align="center">
<img width="70%" height="70%" src="https://user-images.githubusercontent.com/27065646/219638267-a5e11090-aa6e-4e77-87f7-0e95f6ad5978.png"/>
</a>
</p>
## Installation
```console
# clone the repo
$ git clone https://github.com/sherlock-project/sherlock.git
# change the working directory to sherlock
$ cd sherlock
# install the requirements
$ python3 -m pip install -r requirements.txt
```
## Usage
```console
$ python3 sherlock --help
usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT]
[--output OUTPUT] [--tor] [--unique-tor] [--csv] [--xlsx]
[--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE]
[--timeout TIMEOUT] [--print-all] [--print-found] [--no-color]
[--browse] [--local] [--nsfw]
USERNAMES [USERNAMES ...]
Sherlock: Find Usernames Across Social Networks (Version 0.14.3)
positional arguments:
USERNAMES One or more usernames to check with social networks.
Check similar usernames using {?} (replace to '_', '-', '.').
optional arguments:
-h, --help show this help message and exit
--version Display version information and dependencies.
--verbose, -v, -d, --debug
Display extra debugging information and metrics.
--folderoutput FOLDEROUTPUT, -fo FOLDEROUTPUT
If using multiple usernames, the output of the results will be
saved to this folder.
--output OUTPUT, -o OUTPUT
If using single username, the output of the result will be saved
to this file.
--tor, -t Make requests over Tor; increases runtime; requires Tor to be
installed and in system path.
--unique-tor, -u Make requests over Tor with new Tor circuit after each request;
increases runtime; requires Tor to be installed and in system
path.
--csv Create Comma-Separated Values (CSV) File.
--xlsx Create the standard file for the modern Microsoft Excel
spreadsheet (xlsx).
--site SITE_NAME Limit analysis to just the listed sites. Add multiple options to
specify more than one site.
--proxy PROXY_URL, -p PROXY_URL
Make requests over a proxy. e.g. socks5://127.0.0.1:1080
--json JSON_FILE, -j JSON_FILE
Load data from a JSON file or an online, valid, JSON file.
--timeout TIMEOUT Time (in seconds) to wait for response to requests (Default: 60)
--print-all Output sites where the username was not found.
--print-found Output sites where the username was found.
--no-color Don't color terminal output
--browse, -b Browse to all results on default browser.
--local, -l Force the use of the local data.json file.
--nsfw Include checking of NSFW sites from default list.
```
To search for only one user:
```
python3 sherlock user123
```
To search for more than one user:
```
python3 sherlock user1 user2 user3
```
Accounts found will be stored in an individual text file with the corresponding username (e.g ```user123.txt```).
## Anaconda (Windows) Notes
If you are using Anaconda in Windows, using `python3` might not work. Use `python` instead.
## Docker Notes
If docker is installed you can build an image and run this as a container.
```
docker build -t mysherlock-image .
```
Once the image is built, sherlock can be invoked by running the following:
```
docker run --rm -t mysherlock-image user123
```
Use the following command to access the saved results:
```
docker run --rm -t -v "$PWD/results:/opt/sherlock/results" mysherlock-image -o /opt/sherlock/results/text.txt user123
```
Docker is instructed to create (or use) the folder `results` in the current working directory and to mount it at `/opt/sherlock/results` on the docker container by using the ```-v "$PWD/results:/opt/sherlock/results"``` options. `Sherlock` is instructed to export the result using the `-o /opt/sherlock/results/text.txt` option.
### Using `docker-compose`
You can use the `docker-compose.yml` file from the repository and use this command:
```
docker-compose run sherlock -o /opt/sherlock/results/text.txt user123
```
## Contributing
We would love to have you help us with the development of Sherlock. Each and every contribution is greatly valued!
Here are some things we would appreciate your help on:
- Addition of new site support ¹
- Bringing back site support of [sites that have been removed](removed_sites.md) in the past due to false positives
[1] Please look at the Wiki entry on [adding new sites](https://github.com/sherlock-project/sherlock/wiki/Adding-Sites-To-Sherlock)
to understand the issues.
## Tests
Thank you for contributing to Sherlock!
Before creating a pull request with new development, please run the tests
to ensure that everything is working great. It would also be a good idea to run the tests
before starting development to distinguish problems between your
environment and the Sherlock software.
The following is an example of the command line to run all the tests for
Sherlock. This invocation hides the progress text that Sherlock normally
outputs, and instead shows the verbose output of the tests.
```console
$ cd sherlock/sherlock
$ python3 -m unittest tests.all --verbose
```
Note that we do currently have 100% test coverage. Unfortunately, some of
the sites that Sherlock checks are not always reliable, so it is common
to get response problems. Any problems in connection will show up as
warnings in the tests instead of true errors.
If some sites are failing due to connection problems (site is down, in maintenance, etc)
you can exclude them from tests by creating a `tests/.excluded_sites` file with a
list of sites to ignore (one site name per line).
## Star History
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=sherlock-project/sherlock&type=Date&theme=dark" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=sherlock-project/sherlock&type=Date" />
<img alt="Sherlock Project Star History Chart" src="https://api.star-history.com/svg?repos=sherlock-project/sherlock&type=Date" />
</picture>
## License
MIT © Sherlock Project<br/>
Original Creator - [Siddharth Dushantha](https://github.com/sdushantha)

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python
# This module generates the listing of supported sites which can be found in
# sites.mdx. It also organizes all the sites in alphanumeric order
import json
import os
DATA_REL_URI: str = "sherlock_project/resources/data.json"
DEFAULT_ENCODING = "utf-8"
# Read the data.json file
with open(DATA_REL_URI, "r", encoding=DEFAULT_ENCODING) as data_file:
data: dict = json.load(data_file)
# Removes schema-specific keywords for proper processing
social_networks = data.copy()
social_networks.pop('$schema', None)
# Sort the social networks in alphanumeric order
social_networks = sorted(social_networks.items())
# Make output dir where the site list will be written
os.mkdir("output")
# Write the list of supported sites to sites.mdx
with open("output/sites.mdx", "w", encoding=DEFAULT_ENCODING) as site_file:
site_file.write("---\n")
site_file.write("title: 'List of supported sites'\n")
site_file.write("sidebarTitle: 'Supported sites'\n")
site_file.write("icon: 'globe'\n")
site_file.write("description: 'Sherlock currently supports **400+** sites'\n")
site_file.write("---\n\n")
for social_network, info in social_networks:
url_main = info["urlMain"]
is_nsfw = "**(NSFW)**" if info.get("isNSFW") else ""
site_file.write(f"1. [{social_network}]({url_main}) {is_nsfw}\n")
# Overwrite the data.json file with sorted data
with open(DATA_REL_URI, "w", encoding=DEFAULT_ENCODING) as data_file:
sorted_data = json.dumps(data, indent=2, sort_keys=True)
data_file.write(sorted_data)
data_file.write("\n") # Keep the newline after writing data
print("Finished updating supported site listing!")

View File

@ -1,72 +0,0 @@
#!/usr/bin/env python
# This module summarizes the results of site validation tests queued by
# workflow validate_modified_targets for presentation in Issue comments.
from defusedxml import ElementTree as ET
import sys
from pathlib import Path
def summarize_junit_xml(xml_path: Path) -> str:
tree = ET.parse(xml_path)
root = tree.getroot()
suite = root.find('testsuite')
pass_message: str = ":heavy_check_mark: &nbsp; Pass"
fail_message: str = ":x: &nbsp; Fail"
if suite is None:
raise ValueError("Invalid JUnit XML: No testsuite found")
summary_lines: list[str] = []
summary_lines.append("#### Automatic validation of changes\n")
summary_lines.append("| Target | F+ Check | F- Check |")
summary_lines.append("|---|---|---|")
failures = int(suite.get('failures', 0))
errors_detected: bool = False
results: dict[str, dict[str, str]] = {}
for testcase in suite.findall('testcase'):
test_name = testcase.get('name').split('[')[0]
site_name = testcase.get('name').split('[')[1].rstrip(']')
failure = testcase.find('failure')
error = testcase.find('error')
if site_name not in results:
results[site_name] = {}
if test_name == "test_false_neg":
results[site_name]['F- Check'] = pass_message if failure is None and error is None else fail_message
elif test_name == "test_false_pos":
results[site_name]['F+ Check'] = pass_message if failure is None and error is None else fail_message
if error is not None:
errors_detected = True
for result in results:
summary_lines.append(f"| {result} | {results[result].get('F+ Check', 'Error!')} | {results[result].get('F- Check', 'Error!')} |")
if failures > 0:
summary_lines.append("\n___\n" +
"\nFailures were detected on at least one updated target. Commits containing accuracy failures" +
" will often not be merged (unless a rationale is provided, such as false negatives due to regional differences).")
if errors_detected:
summary_lines.append("\n___\n" +
"\n**Errors were detected during validation. Please review the workflow logs.**")
return "\n".join(summary_lines)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: summarize_site_validation.py <junit-xml-file>")
sys.exit(1)
xml_path: Path = Path(sys.argv[1])
if not xml_path.is_file():
print(f"Error: File '{xml_path}' does not exist.")
sys.exit(1)
summary: str = summarize_junit_xml(xml_path)
print(summary)

7
docker-compose.yml Normal file
View File

@ -0,0 +1,7 @@
version: '2'
services:
sherlock:
build: .
volumes:
- "./results:/opt/sherlock/results"

View File

@ -1,143 +0,0 @@
<p align="center">
<br>
<a href="https://sherlock-project.github.io/" target="_blank"><img src="images/sherlock-logo.png" alt="sherlock"/></a>
<br>
<span>Hunt down social media accounts by username across <a href="https://sherlockproject.xyz/sites">400+ social networks</a></span>
<br>
</p>
<p align="center">
<a href="https://sherlockproject.xyz/installation">Installation</a>
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://sherlockproject.xyz/usage">Usage</a>
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://sherlockproject.xyz/contribute">Contributing</a>
</p>
<p align="center">
<img width="70%" height="70%" src="images/demo.png" alt="demo"/>
</p>
## Installation
> [!WARNING]
> Packages for ParrotOS and Ubuntu 24.04, maintained by a third party, appear to be __broken__.
> Users of these systems should defer to pipx/pip or Docker.
| Method | Notes |
| - | - |
| `pipx install sherlock-project` | `pip` may be used in place of `pipx` |
| `docker run -it --rm sherlock/sherlock` |
| `dnf install sherlock-project` | |
Community-maintained packages are available for Debian (>= 13), Ubuntu (>= 22.10), Homebrew, Kali, and BlackArch. These packages are not directly supported or maintained by the Sherlock Project.
See all alternative installation methods [here](https://sherlockproject.xyz/installation)
## General usage
To search for only one user:
```bash
sherlock user123
```
To search for more than one user:
```bash
sherlock user1 user2 user3
```
Accounts found will be stored in an individual text file with the corresponding username (e.g ```user123.txt```).
```console
$ sherlock --help
usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT]
[--output OUTPUT] [--tor] [--unique-tor] [--csv] [--xlsx]
[--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE]
[--timeout TIMEOUT] [--print-all] [--print-found] [--no-color]
[--browse] [--local] [--nsfw]
USERNAMES [USERNAMES ...]
Sherlock: Find Usernames Across Social Networks (Version 0.14.3)
positional arguments:
USERNAMES One or more usernames to check with social networks.
Check similar usernames using {?} (replace to '_', '-', '.').
optional arguments:
-h, --help show this help message and exit
--version Display version information and dependencies.
--verbose, -v, -d, --debug
Display extra debugging information and metrics.
--folderoutput FOLDEROUTPUT, -fo FOLDEROUTPUT
If using multiple usernames, the output of the results will be
saved to this folder.
--output OUTPUT, -o OUTPUT
If using single username, the output of the result will be saved
to this file.
--tor, -t Make requests over Tor; increases runtime; requires Tor to be
installed and in system path.
--unique-tor, -u Make requests over Tor with new Tor circuit after each request;
increases runtime; requires Tor to be installed and in system
path.
--csv Create Comma-Separated Values (CSV) File.
--xlsx Create the standard file for the modern Microsoft Excel
spreadsheet (xlsx).
--site SITE_NAME Limit analysis to just the listed sites. Add multiple options to
specify more than one site.
--proxy PROXY_URL, -p PROXY_URL
Make requests over a proxy. e.g. socks5://127.0.0.1:1080
--json JSON_FILE, -j JSON_FILE
Load data from a JSON file or an online, valid, JSON file.
--timeout TIMEOUT Time (in seconds) to wait for response to requests (Default: 60)
--print-all Output sites where the username was not found.
--print-found Output sites where the username was found.
--no-color Don't color terminal output
--browse, -b Browse to all results on default browser.
--local, -l Force the use of the local data.json file.
--nsfw Include checking of NSFW sites from default list.
```
## Apify Actor Usage [![Sherlock Actor](https://apify.com/actor-badge?actor=netmilk/sherlock)](https://apify.com/netmilk/sherlock?fpr=sherlock)
<a href="https://apify.com/netmilk/sherlock?fpr=sherlock"><img src="https://apify.com/ext/run-on-apify.png" alt="Run Sherlock Actor on Apify" width="176" height="39" /></a>
You can run Sherlock in the cloud without installation using the [Sherlock Actor](https://apify.com/netmilk/sherlock?fpr=sherlock) on [Apify](https://apify.com?fpr=sherlock) free of charge.
``` bash
$ echo '{"usernames":["user123"]}' | apify call -so netmilk/sherlock
[{
"username": "user123",
"links": [
"https://www.1337x.to/user/user123/",
...
]
}]
```
Read more about the [Sherlock Actor](../.actor/README.md), including how to use it programmatically via the Apify [API](https://apify.com/netmilk/sherlock/api?fpr=sherlock), [CLI](https://docs.apify.com/cli/?fpr=sherlock) and [JS/TS and Python SDKs](https://docs.apify.com/sdk?fpr=sherlock).
## Credits
Thank you to everyone who has contributed to Sherlock! ❤️
<a href="https://github.com/sherlock-project/sherlock/graphs/contributors">
<img src="https://contrib.rocks/image?&columns=25&max=10000&&repo=sherlock-project/sherlock" alt="contributors"/>
</a>
## Star History
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=sherlock-project/sherlock&type=Date&theme=dark" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=sherlock-project/sherlock&type=Date" />
<img alt="Sherlock Project Star History Chart" src="https://api.star-history.com/svg?repos=sherlock-project/sherlock&type=Date" />
</picture>
## License
MIT © Sherlock Project<br/>
Original Creator - [Siddharth Dushantha](https://github.com/sdushantha)
<!-- Reference Links -->
[ext_pypi]: https://pypi.org/project/sherlock-project/
[ext_brew]: https://formulae.brew.sh/formula/sherlock

Binary file not shown.

Before

Width:  |  Height:  |  Size: 440 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

View File

@ -1,42 +0,0 @@
<!-- This README should be a mini version at all times for use on pypi -->
<p align=center>
<br>
<a href="https://sherlock-project.github.io/" target="_blank"><img src="https://www.kali.org/tools/sherlock/images/sherlock-logo.svg" width="25%"/></a>
<br>
<strong><span>Hunt down social media accounts by username across <a href="https://github.com/sherlock-project/sherlock/blob/master/sites.md">400+ social networks</a></span></strong>
<br><br>
<span>Additional documentation can be found at our <a href="https://github.com/sherlock-project/sherlock/">GitHub repository</a></span>
<br>
</p>
## Usage
```console
$ sherlock --help
usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT]
[--output OUTPUT] [--tor] [--unique-tor] [--csv] [--xlsx]
[--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE]
[--timeout TIMEOUT] [--print-all] [--print-found] [--no-color]
[--browse] [--local] [--nsfw]
USERNAMES [USERNAMES ...]
```
To search for only one user:
```bash
$ sherlock user123
```
To search for more than one user:
```bash
$ sherlock user1 user2 user3
```
<br>
___
<br>
<p align="center">
<img width="70%" height="70%" src="https://user-images.githubusercontent.com/27065646/219638267-a5e11090-aa6e-4e77-87f7-0e95f6ad5978.png"/>
</a>
</p>

BIN
images/preview.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 139 KiB

View File

@ -1,68 +0,0 @@
[build-system]
requires = [ "poetry-core>=1.2.0" ]
build-backend = "poetry.core.masonry.api"
# poetry-core 1.8 not available in .fc39. Can upgrade to 1.8.0 at .fc39 EOL
[tool.poetry-version-plugin]
source = "init"
[tool.poetry]
name = "sherlock-project"
version = "0.16.0"
description = "Hunt down social media accounts by username across social networks"
license = "MIT"
authors = [
"Siddharth Dushantha <siddharth.dushantha@gmail.com>"
]
maintainers = [
"Paul Pfeister <code@pfeister.dev>",
"Matheus Felipe <matheusfelipeog@protonmail.com>",
"Sondre Karlsen Dyrnes <sondre@villdyr.no>"
]
readme = "docs/pyproject/README.md"
packages = [ { include = "sherlock_project"} ]
keywords = [ "osint", "reconnaissance", "information gathering" ]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Topic :: Security"
]
homepage = "https://sherlockproject.xyz/"
repository = "https://github.com/sherlock-project/sherlock"
[tool.poetry.urls]
"Bug Tracker" = "https://github.com/sherlock-project/sherlock/issues"
[tool.poetry.dependencies]
python = "^3.9"
certifi = ">=2019.6.16"
colorama = "^0.4.1"
PySocks = "^1.7.0"
requests = "^2.22.0"
requests-futures = "^1.0.0"
stem = "^1.8.0"
pandas = "^2.2.1"
openpyxl = "^3.0.10"
tomli = "^2.2.1"
[tool.poetry.group.dev.dependencies]
jsonschema = "^4.0.0"
rstr = "^3.2.2"
pytest = "^8.4.2"
pytest-xdist = "^3.8.0"
[tool.poetry.group.ci.dependencies]
defusedxml = "^0.7.1"
[tool.poetry.scripts]
sherlock = 'sherlock_project.sherlock:main'

View File

@ -1,7 +0,0 @@
[pytest]
addopts = --strict-markers -m "not validate_targets"
markers =
online: mark tests are requiring internet access.
validate_targets: mark tests for sweeping manifest validation (sends many requests).
validate_targets_fp: validate_targets, false positive tests only.
validate_targets_fn: validate_targets, false negative tests only.

860
removed_sites.json Normal file
View File

@ -0,0 +1,860 @@
{
"AdobeForums": {
"errorType": "status_code",
"url": "https://forums.adobe.com/people/{}",
"urlMain": "https://forums.adobe.com/",
"username_claimed": "jack"
},
"AngelList": {
"errorType": "status_code",
"url": "https://angel.co/u/{}",
"urlMain": "https://angel.co/",
"username_claimed": "blue"
},
"Basecamp": {
"errorMsg": "The account you were looking for doesn't exist",
"errorType": "message",
"url": "https://{}.basecamphq.com",
"urlMain": "https://basecamp.com/",
"username_claimed": "blue"
},
"BlackPlanet": {
"errorMsg": "My Hits",
"errorType": "message",
"url": "http://blackplanet.com/{}",
"urlMain": "http://blackplanet.com/"
},
"Canva": {
"errorType": "response_url",
"errorUrl": "https://www.canva.com/{}",
"url": "https://www.canva.com/{}",
"urlMain": "https://www.canva.com/",
"username_claimed": "jenny"
},
"Codementor": {
"errorType": "status_code",
"url": "https://www.codementor.io/@{}",
"urlMain": "https://www.codementor.io/",
"username_claimed": "blue"
},
"EVE Online": {
"errorType": "response_url",
"errorUrl": "https://eveonline.com",
"url": "https://evewho.com/pilot/{}/",
"urlMain": "https://eveonline.com",
"username_claimed": "blue"
},
"fanpop": {
"errorType": "response_url",
"errorUrl": "http://www.fanpop.com/",
"url": "http://www.fanpop.com/fans/{}",
"urlMain": "http://www.fanpop.com/",
"username_claimed": "blue"
},
"Fotolog": {
"errorType": "status_code",
"url": "https://fotolog.com/{}",
"urlMain": "https://fotolog.com/"
},
"Foursquare": {
"errorType": "status_code",
"url": "https://foursquare.com/{}",
"urlMain": "https://foursquare.com/",
"username_claimed": "dens"
},
"gpodder.net": {
"errorType": "status_code",
"url": "https://gpodder.net/user/{}",
"urlMain": "https://gpodder.net/",
"username_claimed": "blue"
},
"Investing.com": {
"errorType": "status_code",
"url": "https://www.investing.com/traders/{}",
"urlMain": "https://www.investing.com/",
"username_claimed": "jenny"
},
"Khan Academy": {
"errorType": "status_code",
"url": "https://www.khanacademy.org/profile/{}",
"urlMain": "https://www.khanacademy.org/",
"username_claimed": "blue"
},
"KiwiFarms": {
"errorMsg": "The specified member cannot be found",
"errorType": "message",
"url": "https://kiwifarms.net/members/?username={}",
"urlMain": "https://kiwifarms.net/",
"username_claimed": "blue"
},
"NPM-Package": {
"errorType": "status_code",
"url": "https://www.npmjs.com/package/{}",
"urlMain": "https://www.npmjs.com/",
"username_claimed": "blue"
},
"Pexels": {
"errorType": "status_code",
"url": "https://www.pexels.com/@{}",
"urlMain": "https://www.pexels.com/",
"username_claimed": "bruno"
},
"Pixabay": {
"errorType": "status_code",
"url": "https://pixabay.com/en/users/{}",
"urlMain": "https://pixabay.com/",
"username_claimed": "blue"
},
"PowerShell Gallery": {
"errorType": "status_code",
"url": "https://www.powershellgallery.com/profiles/{}",
"urlMain": "https://www.powershellgallery.com",
"username_claimed": "powershellteam"
},
"RamblerDating": {
"errorType": "response_url",
"errorUrl": "https://dating.rambler.ru/page/{}",
"url": "https://dating.rambler.ru/page/{}",
"urlMain": "https://dating.rambler.ru/",
"username_claimed": "blue"
},
"Shockwave": {
"errorMsg": "Oh no! You just finished all of the games on the internet!",
"errorType": "message",
"url": "http://www.shockwave.com/member/profiles/{}.jsp",
"urlMain": "http://www.shockwave.com/",
"username_claimed": "blue"
},
"StreamMe": {
"errorType": "status_code",
"url": "https://www.stream.me/{}",
"urlMain": "https://www.stream.me/",
"username_claimed": "blue"
},
"Teknik": {
"errorMsg": "The user does not exist",
"errorType": "message",
"url": "https://user.teknik.io/{}",
"urlMain": "https://teknik.io/",
"username_claimed": "red"
},
"YandexMarket": {
"errorMsg": "\u0422\u0443\u0442 \u043d\u0438\u0447\u0435\u0433\u043e \u043d\u0435\u0442",
"errorType": "message",
"url": "https://market.yandex.ru/user/{}/achievements",
"urlMain": "https://market.yandex.ru/",
"username_claimed": "blue"
},
"Insanejournal": {
"errorMsg": "Unknown user",
"errorType": "message",
"url": "http://{}.insanejournal.com/profile",
"urlMain": "insanejournal.com",
"username_claimed": "blue"
},
"Trip": {
"errorType": "status_code",
"url": "https://www.trip.skyscanner.com/user/{}",
"urlMain": "https://www.trip.skyscanner.com/",
"username_claimed": "blue"
},
"SportsTracker": {
"errorUrl": "https://www.sports-tracker.com/page-not-found",
"errorType": "response_url",
"url": "https://www.sports-tracker.com/view_profile/{}",
"urlMain": "https://www.sports-tracker.com/",
"username_claimed": "blue"
},
"boingboing.net": {
"errorType": "status_code",
"url": "https://bbs.boingboing.net/u/{}",
"urlMain": "https://boingboing.net/",
"username_claimed": "admin"
},
"elwoRU": {
"errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d",
"errorType": "message",
"url": "https://elwo.ru/index/8-0-{}",
"urlMain": "https://elwo.ru/",
"username_claimed": "red"
},
"ingvarr.net.ru": {
"errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d",
"errorType": "message",
"url": "http://ingvarr.net.ru/index/8-0-{}",
"urlMain": "http://ingvarr.net.ru/",
"username_claimed": "red"
},
"Redsun.tf": {
"errorMsg": "The specified member cannot be found",
"errorType": "message",
"url": "https://forum.redsun.tf/members/?username={}",
"urlMain": "https://redsun.tf/",
"username_claimed": "dan"
},
"CreativeMarket": {
"errorType": "status_code",
"url": "https://creativemarket.com/users/{}",
"urlMain": "https://creativemarket.com/",
"username_claimed": "blue"
},
"pvpru": {
"errorType": "status_code",
"url": "https://pvpru.com/board/member.php?username={}&tab=aboutme#aboutme",
"urlMain": "https://pvpru.com/",
"username_claimed": "blue"
},
"easyen": {
"errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d",
"errorType": "message",
"url": "https://easyen.ru/index/8-0-{}",
"urlMain": "https://easyen.ru/",
"username_claimed": "wd"
},
"pedsovet": {
"errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d",
"errorType": "message",
"url": "http://pedsovet.su/index/8-0-{}",
"urlMain": "http://pedsovet.su/",
"username_claimed": "blue"
},
"radioskot": {
"errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d",
"errorType": "message",
"url": "https://radioskot.ru/index/8-0-{}",
"urlMain": "https://radioskot.ru/",
"username_claimed": "red"
},
"Coderwall": {
"errorMsg": "404! Our feels when that url is used",
"errorType": "message",
"url": "https://coderwall.com/{}",
"urlMain": "https://coderwall.com/",
"username_claimed": "jenny"
},
"TamTam": {
"errorType": "response_url",
"errorUrl": "https://tamtam.chat/",
"url": "https://tamtam.chat/{}",
"urlMain": "https://tamtam.chat/",
"username_claimed": "blue"
},
"Zomato": {
"errorType": "status_code",
"headers": {
"Accept-Language": "en-US,en;q=0.9"
},
"url": "https://www.zomato.com/pl/{}/foodjourney",
"urlMain": "https://www.zomato.com/",
"username_claimed": "deepigoyal"
},
"mixer.com": {
"errorType": "status_code",
"url": "https://mixer.com/{}",
"urlMain": "https://mixer.com/",
"urlProbe": "https://mixer.com/api/v1/channels/{}",
"username_claimed": "blue"
},
"KanoWorld": {
"errorType": "status_code",
"url": "https://api.kano.me/progress/user/{}",
"urlMain": "https://world.kano.me/",
"username_claimed": "blue"
},
"YandexCollection": {
"errorType": "status_code",
"url": "https://yandex.ru/collections/user/{}/",
"urlMain": "https://yandex.ru/collections/",
"username_claimed": "blue"
},
"PayPal": {
"errorMsg": "<meta name=\"twitter:title\" content=\"Get your very own PayPal.Me link\" />",
"errorType": "message",
"url": "https://www.paypal.com/paypalme/{}",
"headers": {
"User-Agent": ""
},
"urlMain": "https://www.paypal.me/",
"username_claimed": "blue"
},
"ImageShack": {
"errorType": "response_url",
"errorUrl": "https://imageshack.us/",
"url": "https://imageshack.us/user/{}",
"urlMain": "https://imageshack.us/",
"username_claimed": "blue"
},
"Aptoide": {
"errorType": "status_code",
"url": "https://{}.en.aptoide.com/",
"urlMain": "https://en.aptoide.com/",
"username_claimed": "blue"
},
"Crunchyroll": {
"errorType": "status_code",
"url": "https://www.crunchyroll.com/user/{}",
"urlMain": "https://www.crunchyroll.com/",
"username_claimed": "blue"
},
"T-MobileSupport": {
"errorType": "status_code",
"url": "https://support.t-mobile.com/people/{}",
"urlMain": "https://support.t-mobile.com",
"username_claimed": "blue"
},
"OpenCollective": {
"errorType": "status_code",
"url": "https://opencollective.com/{}",
"urlMain": "https://opencollective.com/",
"username_claimed": "sindresorhus"
},
"SegmentFault": {
"errorType": "status_code",
"url": "https://segmentfault.com/u/{}",
"urlMain": "https://segmentfault.com/",
"username_claimed": "bule"
},
"Viadeo": {
"errorType": "status_code",
"url": "http://fr.viadeo.com/en/profile/{}",
"urlMain": "http://fr.viadeo.com/en/",
"username_claimed": "franck.patissier"
},
"MeetMe": {
"errorType": "response_url",
"errorUrl": "https://www.meetme.com/",
"url": "https://www.meetme.com/{}",
"urlMain": "https://www.meetme.com/",
"username_claimed": "blue"
},
"tracr.co": {
"errorMsg": "No search results",
"errorType": "message",
"regexCheck": "^[A-Za-z0-9]{2,32}$",
"url": "https://tracr.co/users/1/{}",
"urlMain": "https://tracr.co/",
"username_claimed": "blue"
},
"Taringa": {
"errorType": "status_code",
"regexCheck": "^[^.]*$",
"url": "https://www.taringa.net/{}",
"urlMain": "https://taringa.net/",
"username_claimed": "blue"
},
"Photobucket": {
"errorType": "status_code",
"url": "https://photobucket.com/user/{}/library",
"urlMain": "https://photobucket.com/",
"username_claimed": "blue"
},
"4pda": {
"errorMsg": "[1,false,0]",
"errorType": "message",
"url": "https://4pda.ru/forum/index.php?act=search&source=pst&noform=1&username={}",
"urlMain": "https://4pda.ru/",
"urlProbe": " https://4pda.ru/forum/index.php?act=auth&action=chkname&login={}",
"username_claimed": "green"
},
"PokerStrategy": {
"errorType": "status_code",
"url": "http://www.pokerstrategy.net/user/{}/profile/",
"urlMain": "http://www.pokerstrategy.net",
"username_claimed": "blue"
},
"Filmogs": {
"errorType": "status_code",
"url": "https://www.filmo.gs/users/{}",
"urlMain": "https://www.filmo.gs/",
"username_claimed": "cupparober"
},
"500px": {
"errorMsg": "No message available",
"errorType": "message",
"url": "https://500px.com/p/{}",
"urlMain": "https://500px.com/",
"urlProbe": "https://api.500px.com/graphql?operationName=ProfileRendererQuery&variables=%7B%22username%22%3A%22{}%22%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%224d02ff5c13927a3ac73b3eef306490508bc765956940c31051468cf30402a503%22%7D%7D",
"username_claimed": "blue"
},
"Badoo": {
"errorType": "status_code",
"url": "https://badoo.com/profile/{}",
"urlMain": "https://badoo.com/",
"username_claimed": "blue"
},
"Pling": {
"errorMsg": "Resource not found",
"errorType": "message",
"url": "https://www.pling.com/u/{}/",
"urlMain": "https://www.pling.com/",
"username_claimed": "blue"
},
"Realmeye": {
"errorMsg": "Sorry, but we either:",
"errorType": "message",
"url": "https://www.realmeye.com/player/{}",
"urlMain": "https://www.realmeye.com/",
"username_claimed": "blue"
},
"Travellerspoint": {
"errorMsg": "Wooops. Sorry!",
"errorType": "message",
"url": "https://www.travellerspoint.com/users/{}",
"urlMain": "https://www.travellerspoint.com",
"username_claimed": "blue"
},
"GDProfiles": {
"errorType": "status_code",
"url": "https://gdprofiles.com/{}",
"urlMain": "https://gdprofiles.com/",
"username_claimed": "blue"
},
"AllTrails": {
"errorMsg": "class=\"home index\"",
"errorType": "message",
"url": "https://www.alltrails.com/members/{}",
"urlMain": "https://www.alltrails.com/",
"username_claimed": "blue"
},
"Cent": {
"errorMsg": "<title>Cent</title>",
"errorType": "message",
"url": "https://beta.cent.co/@{}",
"urlMain": "https://cent.co/",
"username_claimed": "blue"
},
"Anobii": {
"errorType": "response_url",
"url": "https://www.anobii.com/{}/profile",
"urlMain": "https://www.anobii.com/",
"username_claimed": "blue"
},
"Kali community": {
"errorMsg": "This user has not registered and therefore does not have a profile to view.",
"errorType": "message",
"url": "https://forums.kali.org/member.php?username={}",
"urlMain": "https://forums.kali.org/",
"username_claimed": "blue"
},
"NameMC (Minecraft.net skins)": {
"errorMsg": "Profiles: 0 results",
"errorType": "message",
"url": "https://namemc.com/profile/{}",
"urlMain": "https://namemc.com/",
"username_claimed": "blue"
},
"Steamid": {
"errorMsg": "<link rel=\"canonical\" href=\"https://steamid.uk\" />",
"errorType": "message",
"url": "https://steamid.uk/profile/{}",
"urlMain": "https://steamid.uk/",
"username_claimed": "blue"
},
"TripAdvisor": {
"errorMsg": "This page is on vacation\u2026",
"errorType": "message",
"url": "https://tripadvisor.com/members/{}",
"urlMain": "https://tripadvisor.com/",
"username_claimed": "blue"
},
"House-Mixes.com": {
"errorMsg": "Profile Not Found",
"errorType": "message",
"regexCheck": "^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$",
"url": "https://www.house-mixes.com/profile/{}",
"urlMain": "https://www.house-mixes.com/",
"username_claimed": "blue"
},
"Quora": {
"errorMsg": "Page Not Found",
"errorType": "message",
"url": "https://www.quora.com/profile/{}",
"urlMain": "https://www.quora.com/",
"username_claimed": "Matt-Riggsby"
},
"SparkPeople": {
"errorMsg": "We couldn't find that user",
"errorType": "message",
"url": "https://www.sparkpeople.com/mypage.asp?id={}",
"urlMain": "https://www.sparkpeople.com",
"username_claimed": "adam"
},
"Cloob": {
"errorType": "status_code",
"url": "https://www.cloob.com/name/{}",
"urlMain": "https://www.cloob.com/",
"username_claimed": "blue"
},
"TM-Ladder": {
"errorMsg": "player unknown or invalid",
"errorType": "message",
"url": "http://en.tm-ladder.com/{}_rech.php",
"urlMain": "http://en.tm-ladder.com/index.php",
"username_claimed": "blue"
},
"plug.dj": {
"errorType": "status_code",
"url": "https://plug.dj/@/{}",
"urlMain": "https://plug.dj/",
"username_claimed": "plug-dj-rock"
},
"Facenama": {
"errorType": "response_url",
"errorUrl": "https://facenama.com/404.html",
"regexCheck": "^[-a-zA-Z0-9_]+$",
"url": "https://facenama.com/{}",
"urlMain": "https://facenama.com/",
"username_claimed": "blue"
},
"Designspiration": {
"errorType": "status_code",
"url": "https://www.designspiration.net/{}/",
"urlMain": "https://www.designspiration.net/",
"username_claimed": "blue"
},
"CapFriendly": {
"errorMsg": "<div class=\"err show p5\">No results found</div>",
"errorType": "message",
"regexCheck": "^[a-zA-z][a-zA-Z0-9_]{2,79}$",
"url": "https://www.capfriendly.com/users/{}",
"urlMain": "https://www.capfriendly.com/",
"username_claimed": "thisactuallyexists"
},
"Gab": {
"errorMsg": "The page you are looking for isn't here.",
"errorType": "message",
"url": "https://gab.com/{}",
"urlMain": "https://gab.com",
"username_claimed": "a"
},
"FanCentro": {
"errorMsg": "var environment",
"errorType": "message",
"url": "https://fancentro.com/{}",
"urlMain": "https://fancentro.com/",
"username_claimed": "nielsrosanna"
},
"Codeforces": {
"errorType": "response_url",
"errorUrl": "https://codeforces.com/",
"url": "https://codeforces.com/profile/{}",
"urlMain": "https://www.codeforces.com/",
"username_claimed": "tourist"
},
"Smashcast": {
"errorType": "status_code",
"url": "https://www.smashcast.tv/api/media/live/{}",
"urlMain": "https://www.smashcast.tv/",
"username_claimed": "hello"
},
"Countable": {
"errorType": "status_code",
"url": "https://www.countable.us/{}",
"urlMain": "https://www.countable.us/",
"username_claimed": "blue"
},
"Spotify": {
"errorType": "status_code",
"url": "https://open.spotify.com/user/{}",
"urlMain": "https://open.spotify.com/",
"username_claimed": "blue"
},
"Raidforums": {
"errorType": "status_code",
"url": "https://raidforums.com/User-{}",
"urlMain": "https://raidforums.com/",
"username_claimed": "red"
},
"Pinterest": {
"errorType": "status_code",
"url": "https://www.pinterest.com/{}/",
"urlMain": "https://www.pinterest.com/",
"username_claimed": "blue"
},
"PCPartPicker": {
"errorType": "status_code",
"url": "https://pcpartpicker.com/user/{}",
"urlMain": "https://pcpartpicker.com",
"username_claimed": "blue"
},
"eBay.com": {
"errorMsg": "The User ID you entered was not found. Please check the User ID and try again.",
"errorType": "message",
"url": "https://www.ebay.com/usr/{}",
"urlMain": "https://www.ebay.com/",
"username_claimed": "blue"
},
"eBay.de": {
"errorMsg": "Der eingegebene Nutzername wurde nicht gefunden. Bitte pr\u00fcfen Sie den Nutzernamen und versuchen Sie es erneut.",
"errorType": "message",
"url": "https://www.ebay.de/usr/{}",
"urlMain": "https://www.ebay.de/",
"username_claimed": "blue"
},
"Ghost": {
"errorMsg": "Domain Error",
"errorType": "message",
"url": "https://{}.ghost.io/",
"urlMain": "https://ghost.org/",
"username_claimed": "troyhunt"
},
"Atom Discussions": {
"errorMsg": "Oops! That page doesn\u2019t exist or is private.",
"errorType": "message",
"url": "https://discuss.atom.io/u/{}/summary",
"urlMain": "https://discuss.atom.io",
"username_claimed": "blue"
},
"Gam1ng": {
"errorType": "status_code",
"url": "https://gam1ng.com.br/user/{}",
"urlMain": "https://gam1ng.com.br",
"username_claimed": "PinKgirl"
},
"OGUsers": {
"errorType": "status_code",
"url": "https://ogusers.com/{}",
"urlMain": "https://ogusers.com/",
"username_claimed": "ogusers"
},
"Otzovik": {
"errorType": "status_code",
"url": "https://otzovik.com/profile/{}",
"urlMain": "https://otzovik.com/",
"username_claimed": "blue"
},
"radio_echo_msk": {
"errorType": "status_code",
"url": "https://echo.msk.ru/users/{}",
"urlMain": "https://echo.msk.ru/",
"username_claimed": "blue"
},
"Ello": {
"errorMsg": "We couldn't find the page you're looking for",
"errorType": "message",
"url": "https://ello.co/{}",
"urlMain": "https://ello.co/",
"username_claimed": "blue"
},
"GitHub Support Community": {
"errorMsg": "Oops! That page doesn\u2019t exist or is private.",
"errorType": "message",
"url": "https://github.community/u/{}/summary",
"urlMain": "https://github.community",
"username_claimed": "jperl"
},
"GuruShots": {
"errorType": "status_code",
"url": "https://gurushots.com/{}/photos",
"urlMain": "https://gurushots.com/",
"username_claimed": "blue"
},
"Google Developer": {
"errorMsg": "Sorry, the profile was not found.",
"errorType": "message",
"url": "https://g.dev/{}",
"urlMain": "https://g.dev/",
"username_claimed": "blue"
},
"mastodon.technology": {
"errorType": "status_code",
"url": "https://mastodon.technology/@{}",
"urlMain": "https://mastodon.xyz/",
"username_claimed": "ashfurrow"
},
"zoomit": {
"errorMsg": "\u0645\u062a\u0627\u0633\u0641\u0627\u0646\u0647 \u0635\u0641\u062d\u0647 \u06cc\u0627\u0641\u062a \u0646\u0634\u062f",
"errorType": "message",
"url": "https://www.zoomit.ir/user/{}",
"urlMain": "https://www.zoomit.ir",
"username_claimed": "kossher"
},
"Facebook": {
"errorType": "status_code",
"regexCheck": "^[a-zA-Z0-9\\.]{3,49}(?<!\\.com|\\.org|\\.net)$",
"url": "https://www.facebook.com/{}",
"urlMain": "https://www.facebook.com/",
"urlProbe": "https://www.facebook.com/{}/videos/",
"username_claimed": "hackerman"
},
"BinarySearch": {
"errorMsg": "{}",
"errorType": "message",
"regexCheck": "^[a-zA-Z0-9-_]{1,15}$",
"url": "https://binarysearch.io/@/{}",
"urlMain": "https://binarysearch.io/",
"urlProbe": "https://binarysearch.io/api/users/{}/profile",
"username_claimed": "Eyes_Wide_Shut"
},
"Arduino": {
"errorType": "status_code",
"regexCheck": "^(?![_-])[A-Za-z0-9_-]{3,}$",
"url": "https://create.arduino.cc/projecthub/{}",
"urlMain": "https://www.arduino.cc/",
"username_claimed": "blue"
},
"koo": {
"errorMsg": "This profile does not exist",
"errorType": "message",
"url": "https://www.kooapp.com/profile/{}",
"urlMain": "https://www.kooapp.com",
"urlProbe": "https://www.kooapp.com/apiV1/users/handle/{}/valid",
"username_claimed": "john"
},
"We Heart It": {
"errorMsg": "Oops! You've landed on a moving target!",
"errorType": "message",
"url": "https://weheartit.com/{}",
"urlMain": "https://weheartit.com/",
"username_claimed": "ventivogue"
},
"Tinder": {
"errorMsg": [
"<title data-react-helmet=\"true\">Tinder | Dating, Make Friends &amp; Meet New People</title>",
"<title data-react-helmet=\"true\">Tinder | Match. Chat. Date.</title>"
],
"errorType": "message",
"url": "https://www.tinder.com/@{}",
"urlMain": "https://tinder.com/",
"username_claimed": "blue"
},
"Coil": {
"errorMsg": "User not found",
"errorType": "message",
"request_method": "POST",
"request_payload": {
"operationName": "getCreator",
"query": "query getCreator($userShortName:String!){getCreator(userShortName:$userShortName){id}}",
"variables": {
"userShortName": "{}"
}
},
"url": "https://coil.com/u/{}",
"urlMain": "https://coil.com/",
"urlProbe": "https://coil.com/gateway",
"username_claimed": "adam"
},
"OnlyFans": {
"errorType": "status_code",
"isNSFW": true,
"url": "https://onlyfans.com/{}",
"urlMain": "https://onlyfans.com/",
"urlProbe": "https://onlyfans.com/api2/v2/users/{}",
"username_claimed": "theemilylynne"
},
"OK": {
"errorType": "status_code",
"regexCheck": "^[a-zA-Z][a-zA-Z0-9_.-]*$",
"url": "https://ok.ru/{}",
"urlMain": "https://ok.ru/",
"username_claimed": "ok"
},
"forumhouseRU": {
"errorMsg": "\u0423\u043a\u0430\u0437\u0430\u043d\u043d\u044b\u0439 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d. \u041f\u043e\u0436\u0430\u043b\u0443\u0439\u0441\u0442\u0430, \u0432\u0432\u0435\u0434\u0438\u0442\u0435 \u0434\u0440\u0443\u0433\u043e\u0435 \u0438\u043c\u044f.",
"errorType": "message",
"url": "https://www.forumhouse.ru/members/?username={}",
"urlMain": "https://www.forumhouse.ru/",
"username_claimed": "red"
},
"Enjin": {
"errorMsg": "Yikes, there seems to have been an error. We've taken note and will check out the problem right away!",
"errorType": "message",
"url": "https://www.enjin.com/profile/{}",
"urlMain": "https://www.enjin.com/",
"username_claimed": "blue"
},
"IRL": {
"errorType": "status_code",
"url": "https://www.irl.com/{}",
"urlMain": "https://www.irl.com/",
"username_claimed": "hacker"
},
"Munzee": {
"errorType": "status_code",
"url": "https://www.munzee.com/m/{}",
"urlMain": "https://www.munzee.com/",
"username_claimed": "blue"
},
"Quizlet": {
"errorMsg": "Page Unavailable",
"errorType": "message",
"url": "https://quizlet.com/{}",
"urlMain": "https://quizlet.com",
"username_claimed": "blue"
},
"GunsAndAmmo": {
"errorType": "status_code",
"url": "https://forums.gunsandammo.com/profile/{}",
"urlMain": "https://gunsandammo.com/",
"username_claimed": "adam"
},
"TikTok": {
"errorType": "status_code",
"url": "https://tiktok.com/@{}",
"urlMain": "https://tiktok.com/",
"username_claimed": "red"
},
"Lolchess": {
"errorMsg": "No search results",
"errorType": "message",
"url": "https://lolchess.gg/profile/na/{}",
"urlMain": "https://lolchess.gg/",
"username_claimed": "blue"
},
"Virgool": {
"errorMsg": "\u06f4\u06f0\u06f4",
"errorType": "message",
"url": "https://virgool.io/@{}",
"urlMain": "https://virgool.io/",
"username_claimed": "blue"
},
"Whonix Forum": {
"errorType": "status_code",
"url": "https://forums.whonix.org/u/{}/summary",
"urlMain": "https://forums.whonix.org/",
"username_claimed": "red"
},
"ebio.gg": {
"errorType": "status_code",
"url": "https://ebio.gg/{}",
"urlMain": "https:/ebio.gg",
"username_claimed": "dev"
},
"metacritic": {
"errorMsg": "User not found",
"errorType": "message",
"regexCheck": "^(?![-_].)[A-Za-z0-9-_]{3,15}$",
"url": "https://www.metacritic.com/user/{}",
"urlMain": "https://www.metacritic.com/",
"username_claimed": "blue"
},
"Oracle Communities": {
"errorType": "status_code",
"url": "https://community.oracle.com/people/{}",
"urlMain": "https://community.oracle.com",
"username_claimed": "dev"
},
"HexRPG": {
"errorMsg": "Error : User ",
"errorType": "message",
"regexCheck": "^[a-zA-Z0-9_ ]{3,20}$",
"url": "https://www.hexrpg.com/userinfo/{}",
"urlMain": "https://www.hexrpg.com/",
"username_claimed": "blue"
},
"G2G": {
"errorType": "response_url",
"errorUrl": "https://www.g2g.com/{}",
"regexCheck": "^[A-Za-z][A-Za-z0-9_]{2,11}$",
"url": "https://www.g2g.com/{}",
"urlMain": "https://www.g2g.com/",
"username_claimed": "user"
},
"BitCoinForum": {
"errorMsg": "The user whose profile you are trying to view does not exist.",
"errorType": "message",
"url": "https://bitcoinforum.com/profile/{}",
"urlMain": "https://bitcoinforum.com",
"username_claimed": "bitcoinforum.com"
}
}

View File

@ -84,6 +84,22 @@ As of 2020-02-23, all usernames are reported as not existing.
},
```
## Fanpop
As of 2020-02-23, all usernames are reported as not existing.
```json
"fanpop": {
"errorType": "response_url",
"errorUrl": "http://www.fanpop.com/",
"rank": 9454,
"url": "http://www.fanpop.com/fans/{}",
"urlMain": "http://www.fanpop.com/",
"username_claimed": "blue",
"username_unclaimed": "noonewould_everusethis7"
},
```
## Canva
As of 2020-02-23, all usernames are reported as not existing.
@ -602,7 +618,7 @@ removed
## Coderwall
As of 2020-07-06, Coderwall returns false positives when checking for an username which contains a period.
I have tried to find out what Coderwall's criteria is for a valid username, but unfortunately I have not been able to
I have tried to find out what Coderwall's criteria is for a valid username, but unfortunately I have not been able to
find it and because of this, the best thing we can do now is to remove it.
```json
"Coderwall": {
@ -650,15 +666,15 @@ As of 2020-07-24, Zomato seems to be unstable. Majority of the time, Zomato take
## Mixer
As of 2020-07-22, the Mixer service has closed down.
```json
"mixer.com": {
"errorType": "status_code",
"rank": 1544,
"url": "https://mixer.com/{}",
"urlMain": "https://mixer.com/",
"urlProbe": "https://mixer.com/api/v1/channels/{}",
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
"mixer.com": {
"errorType": "status_code",
"rank": 1544,
"url": "https://mixer.com/{}",
"urlMain": "https://mixer.com/",
"urlProbe": "https://mixer.com/api/v1/channels/{}",
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
```
@ -1257,6 +1273,19 @@ As of 2022-05-1, FanCentro returns false positives. Will later in new version of
},
```
## Codeforces
As og 2022-05-01, Codeforces returns false positives
```json
"Codeforces": {
"errorType": "response_url",
"errorUrl": "https://codeforces.com/",
"url": "https://codeforces.com/profile/{}",
"urlMain": "https://www.codeforces.com/",
"username_claimed": "tourist",
"username_unclaimed": "noonewouldeverusethis789"
},
```
## Smashcast
As og 2022-05-01, Smashcast is down
```json
@ -1271,7 +1300,7 @@ As og 2022-05-01, Smashcast is down
## Countable
As og 2022-05-01, Countable returns false positives
As og 2022-05-01, Countable returns false positives
```json
"Countable": {
"errorType": "status_code",
@ -1838,160 +1867,4 @@ __2024-04-24 :__ BCF seems to have gone defunct. Uncertain.
"urlMain": "https://bitcoinforum.com",
"username_claimed": "bitcoinforum.com"
}
```
## Zhihu
As of 24.06.2024, Zhihu returns false positives as they obfuscate the code thats returned. Checking for patterns may allow us to find a way to detect the existans of a user, this will be need to be worked on later
```json
"Zhihu": {
"errorMsg": "用户不存在",
"errorType": "message",
"url": "https://www.zhihu.com/people/{}",
"urlMain": "https://www.zhihu.com/",
"username_claimed": "blue"
}
```
## Penetestit
As of 24.06.2024, Pentestit returns a 403. This is most likely due to a new site structures
```json
"labpentestit": {
"errorType": "response_url",
"errorUrl": "https://lab.pentestit.ru/{}",
"url": "https://lab.pentestit.ru/profile/{}",
"urlMain": "https://lab.pentestit.ru/",
"username_claimed": "CSV"
}
```
## Euw
__2024-06-09 :__ errorMsg detection doesn't work anymore, because the error message is included in HTTP request body, even in successful search
```json
"Euw": {
"errorMsg": "This summoner is not registered at OP.GG. Please check spelling.",
"errorType": "message",
"url": "https://euw.op.gg/summoner/userName={}",
"urlMain": "https://euw.op.gg/",
"username_claimed": "blue"
}
```
## Etsy
__2024-06-10 :__ Http request returns 403 forbidden, and tries to verify the connection, so it doesn't work anymore
```json
"Etsy": {
"errorType": "status_code",
"url": "https://www.etsy.com/shop/{}",
"urlMain": "https://www.etsy.com/",
"username_claimed": "JennyKrafts"
}
```
## Alik.cz
__2024-07-21 :__ Target is now BLACKLISTED from the default manifest due to the site recieving unnecessarily high traffic from Sherlock (by request of the site owners). This target is not permitted to be reactivited. Inclusion in unrelated manifests is not impacted, but it is discouraged.
## 8tracks
__2025-02-02 :__ Might be dead again. Nobody knows for sure.
```json
"8tracks": {
"errorType": "message",
"errorMsg": "\"available\":true",
"headers": {
"Accept-Language": "en-US,en;q=0.5"
},
"url": "https://8tracks.com/{}",
"urlProbe": "https://8tracks.com/users/check_username?login={}&format=jsonh",
"urlMain": "https://8tracks.com/",
"username_claimed": "blue"
}
```
## Shpock
__2025-02-02 :__ Can likely be added back with a new endpoint (source username availability endpoint from mobile app reg flow?)
```json
"Shpock": {
"errorType": "status_code",
"url": "https://www.shpock.com/shop/{}/items",
"urlMain": "https://www.shpock.com/",
"username_claimed": "user"
}
```
## Twitch
__2025-02-02 :__
```json
"Twitch": {
"errorType": "message",
"errorMsg": "components.availability-tracking.warn-unavailable.component",
"url": "https://www.twitch.tv/{}",
"urlMain": "https://www.twitch.tv/",
"urlProbe": "https://m.twitch.tv/{}",
"username_claimed": "jenny"
}
```
## Fiverr
__2025-02-02 :__ Fiverr added CSRF protections that messed with this test
```json
"Fiverr": {
"errorMsg": "\"status\":\"success\"",
"errorType": "message",
"headers": {
"Content-Type": "application/json",
"Accept-Language": "en-US,en;q=0.9"
},
"regexCheck": "^[A-Za-z][A-Za-z\\d_]{5,14}$",
"request_method": "POST",
"request_payload": {
"username": "{}"
},
"url": "https://www.fiverr.com/{}",
"urlMain": "https://www.fiverr.com/",
"urlProbe": "https://www.fiverr.com/validate_username",
"username_claimed": "blueman"
}
```
## BabyRU
__2025-02-02 :__ Just being problematic (possibly related to errorMsg encoding?)
```json
"babyRU": {
"errorMsg": [
"\u0421\u0442\u0440\u0430\u043d\u0438\u0446\u0430, \u043a\u043e\u0442\u043e\u0440\u0443\u044e \u0432\u044b \u0438\u0441\u043a\u0430\u043b\u0438, \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d\u0430",
"Доступ с вашего IP-адреса временно ограничен"
],
"errorType": "message",
"url": "https://www.baby.ru/u/{}/",
"urlMain": "https://www.baby.ru/",
"username_claimed": "blue"
}
```
## v0.dev
__2025-02-16 :__ Unsure if any way to view profiles exists now
```json
"v0.dev": {
"errorType": "message",
"errorMsg": "<title>v0 by Vercel</title>",
"url": "https://v0.dev/{}",
"urlMain": "https://v0.dev",
"username_claimed": "t3dotgg"
}
```
## TorrentGalaxy
__2025-07-06 :__ Site appears to have gone offline in March and hasn't come back
```json
"TorrentGalaxy": {
"errorMsg": "<title>TGx:Can't show details</title>",
"errorType": "message",
"regexCheck": "^[A-Za-z0-9]{3,15}$",
"url": "https://torrentgalaxy.to/profile/{}",
"urlMain": "https://torrentgalaxy.to/",
"username_claimed": "GalaxyRG"
},
```
```

10
requirements.txt Normal file
View File

@ -0,0 +1,10 @@
certifi>=2019.6.16
colorama>=0.4.1
PySocks>=1.7.0
requests>=2.22.0
requests-futures>=1.0.0
stem>=1.8.0
torrequest>=0.1.0
pandas>=1.0.0
openpyxl<=3.0.10
exrex>=0.11.0

6
sherlock/__init__.py Normal file
View File

@ -0,0 +1,6 @@
""" Sherlock Module
This module contains the main logic to search for usernames at social
networks.
"""

View File

@ -14,9 +14,9 @@ if __name__ == "__main__":
# Check if the user is using the correct version of Python
python_version = sys.version.split()[0]
if sys.version_info < (3, 9):
print(f"Sherlock requires Python 3.9+\nYou are using Python {python_version}, which is not supported by Sherlock.")
if sys.version_info < (3, 6):
print(f"Sherlock requires Python 3.6+\nYou are using Python {python_version}, which is not supported by Sherlock.")
sys.exit(1)
from sherlock_project import sherlock
import sherlock
sherlock.main()

View File

@ -3,7 +3,7 @@
This module defines the objects for notifying the caller about the
results of queries.
"""
from sherlock_project.result import QueryStatus
from result import QueryStatus
from colorama import Fore, Style
import webbrowser

View File

@ -0,0 +1,80 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Sherlock Targets",
"description": "Social media target to probe for existence of usernames",
"type": "object",
"properties": {
"$schema": { "type": "string" }
},
"patternProperties": {
"^(?!\\$).*?$": {
"type": "object",
"description": "User-friendly target name",
"required": [ "url", "urlMain", "errorType", "username_claimed" ],
"properties": {
"url": { "type": "string" },
"urlMain": { "type": "string" },
"urlProbe": { "type": "string" },
"username_claimed": { "type": "string" },
"regexCheck": { "type": "string" },
"isNSFW": { "type": "boolean" },
"headers": { "type": "object" },
"request_payload": { "type": "object" },
"__comment__": {
"type": "string",
"description": "Used to clarify important target information if (and only if) a commit message would not suffice.\nThis key should not be parsed anywhere within Sherlock."
},
"tags": {
"oneOf": [
{ "$ref": "#/$defs/tag" },
{ "type": "array", "items": { "$ref": "#/$defs/tag" } }
]
},
"request_method": {
"type": "string",
"enum": [ "GET", "POST", "HEAD", "PUT" ]
},
"errorType": {
"type": "string",
"enum": [ "message", "response_url", "status_code" ]
},
"errorMsg": {
"oneOf": [
{ "type": "string" },
{ "type": "array", "items": { "type": "string" } }
]
},
"errorCode": {
"oneOf": [
{ "type": "integer" },
{ "type": "array", "items": { "type": "integer" } }
]
},
"errorUrl": { "type": "string" },
"response_url": { "type": "string" }
},
"dependencies": {
"errorMsg": {
"properties" : { "errorType": { "const": "message" } }
},
"errorUrl": {
"properties": { "errorType": { "const": "response_url" } }
},
"errorCode": {
"properties": { "errorType": { "const": "status_code" } }
}
},
"if": { "properties": { "errorType": { "const": "message" } } },
"then": { "required": [ "errorMsg" ] },
"else": {
"if": { "properties": { "errorType": { "const": "response_url" } } },
"then": { "required": [ "errorUrl" ] }
},
"additionalProperties": false
}
},
"additionalProperties": false,
"$defs": {
"tag": { "type": "string", "enum": [ "adult", "gaming" ] }
}
}

View File

@ -7,43 +7,30 @@ This module contains the main logic to search for usernames at social
networks.
"""
import sys
try:
from sherlock_project.__init__ import import_error_test_var # noqa: F401
except ImportError:
print("Did you run Sherlock with `python3 sherlock/sherlock.py ...`?")
print("This is an outdated method. Please see https://sherlockproject.xyz/installation for up to date instructions.")
sys.exit(1)
import csv
import signal
import pandas as pd
import os
import platform
import re
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from json import loads as json_loads
from time import monotonic
from typing import Optional
import requests
from requests_futures.sessions import FuturesSession
from sherlock_project.__init__ import (
__longname__,
__shortname__,
__version__,
forge_api_latest_release,
)
from sherlock_project.result import QueryStatus
from sherlock_project.result import QueryResult
from sherlock_project.notify import QueryNotify
from sherlock_project.notify import QueryNotifyPrint
from sherlock_project.sites import SitesInformation
from torrequest import TorRequest
from result import QueryStatus
from result import QueryResult
from notify import QueryNotifyPrint
from sites import SitesInformation
from colorama import init
from argparse import ArgumentTypeError
module_name = "Sherlock: Find Usernames Across Social Networks"
__version__ = "0.14.4"
class SherlockFuturesSession(FuturesSession):
def request(self, method, url, hooks=None, *args, **kwargs):
@ -156,6 +143,7 @@ def check_for_parameter(username):
return "{?}" in username
checksymbols = []
checksymbols = ["_", "-", "."]
@ -168,13 +156,14 @@ def multiple_usernames(username):
def sherlock(
username: str,
site_data: dict[str, dict[str, str]],
query_notify: QueryNotify,
dump_response: bool = False,
proxy: Optional[str] = None,
timeout: int = 60,
) -> dict[str, dict[str, str | QueryResult]]:
username,
site_data,
query_notify,
tor=False,
unique_tor=False,
proxy=None,
timeout=60,
):
"""Run Sherlock Analysis.
Checks for existence of username on various social media sites.
@ -186,6 +175,8 @@ def sherlock(
query_notify -- Object with base type of QueryNotify().
This will be used to notify the caller about
query results.
tor -- Boolean indicating whether to use a tor circuit for the requests.
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
proxy -- String indicating the proxy URL
timeout -- Time in seconds to wait before timing out request.
Default is 60 seconds.
@ -206,9 +197,15 @@ def sherlock(
# Notify caller that we are starting the query.
query_notify.start(username)
# Normal requests
underlying_session = requests.session()
# Create session based on request methodology
if tor or unique_tor:
# Requests using Tor obfuscation
underlying_request = TorRequest()
underlying_session = underlying_request.session
else:
# Normal requests
underlying_session = requests.session()
underlying_request = requests.Request()
# Limit number of workers to 20.
# This is probably vastly overkill.
@ -235,7 +232,7 @@ def sherlock(
# A user agent is needed because some sites don't return the correct
# information since they think that we are bots (Which we actually are...)
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:129.0) Gecko/20100101 Firefox/129.0",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
}
if "headers" in net_info:
@ -332,10 +329,15 @@ def sherlock(
# Store future in data for access later
net_info["request_future"] = future
# Reset identify for tor (if needed)
if unique_tor:
underlying_request.reset_identity()
# Add this site's results into final dictionary with all the other results.
results_total[social_network] = results_site
# Open the file containing account links
# Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
for social_network, net_info in site_data.items():
# Retrieve results again
results_site = results_total.get(social_network)
@ -349,8 +351,6 @@ def sherlock(
# Get the expected error type
error_type = net_info["errorType"]
if isinstance(error_type, str):
error_type: list[str] = [error_type]
# Retrieve future and ensure it has finished
future = net_info["request_future"]
@ -377,16 +377,12 @@ def sherlock(
query_status = QueryStatus.UNKNOWN
error_context = None
# As WAFs advance and evolve, they will occasionally block Sherlock and
# lead to false positives and negatives. Fingerprints should be added
# here to filter results that fail to bypass WAFs. Fingerprints should
# be highly targetted. Comment at the end of each fingerprint to
# indicate target and date fingerprinted.
# As WAFs advance and evolve, they will occasionally block Sherlock and lead to false positives
# and negatives. Fingerprints should be added here to filter results that fail to bypass WAFs.
# Fingerprints should be highly targetted. Comment at the end of each fingerprint to indicate target and date.
WAFHitMsgs = [
r'.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark', # 2024-05-13 Cloudflare
r'<span id="challenge-error-text">', # 2024-11-11 Cloudflare error page
r'AwsWafIntegration.forceRefreshToken', # 2024-11-11 Cloudfront (AWS)
r'{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security
'.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark .big-button,body.dark .pow-button{background-color:#4693ff;color:#1d1d1d}body.dark #challenge-success-text{background-image:url(data:image/svg+xml;base64,', # 2024-04-08 Cloudflare
'{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security
]
if error_text is not None:
@ -395,91 +391,61 @@ def sherlock(
elif any(hitMsg in r.text for hitMsg in WAFHitMsgs):
query_status = QueryStatus.WAF
else:
if any(errtype not in ["message", "status_code", "response_url"] for errtype in error_type):
error_context = f"Unknown error type '{error_type}' for {social_network}"
query_status = QueryStatus.UNKNOWN
elif error_type == "message":
# error_flag True denotes no error found in the HTML
# error_flag False denotes error found in the HTML
error_flag = True
errors = net_info.get("errorMsg")
# errors will hold the error message
# it can be string or list
# by isinstance method we can detect that
# and handle the case for strings as normal procedure
# and if its list we can iterate the errors
if isinstance(errors, str):
# Checks if the error message is in the HTML
# if error is present we will set flag to False
if errors in r.text:
error_flag = False
else:
if "message" in error_type:
# error_flag True denotes no error found in the HTML
# error_flag False denotes error found in the HTML
error_flag = True
errors = net_info.get("errorMsg")
# errors will hold the error message
# it can be string or list
# by isinstance method we can detect that
# and handle the case for strings as normal procedure
# and if its list we can iterate the errors
if isinstance(errors, str):
# Checks if the error message is in the HTML
# if error is present we will set flag to False
if errors in r.text:
error_flag = False
else:
# If it's list, it will iterate all the error message
for error in errors:
if error in r.text:
error_flag = False
break
if error_flag:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
# If it's list, it will iterate all the error message
for error in errors:
if error in r.text:
error_flag = False
break
if error_flag:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
elif error_type == "status_code":
error_codes = net_info.get("errorCode")
query_status = QueryStatus.CLAIMED
if "status_code" in error_type and query_status is not QueryStatus.AVAILABLE:
error_codes = net_info.get("errorCode")
query_status = QueryStatus.CLAIMED
# Type consistency, allowing for both singlets and lists in manifest
if isinstance(error_codes, int):
error_codes = [error_codes]
# Type consistency, allowing for both singlets and lists in manifest
if isinstance(error_codes, int):
error_codes = [error_codes]
if error_codes is not None and r.status_code in error_codes:
query_status = QueryStatus.AVAILABLE
elif r.status_code >= 300 or r.status_code < 200:
query_status = QueryStatus.AVAILABLE
if "response_url" in error_type and query_status is not QueryStatus.AVAILABLE:
# For this detection method, we have turned off the redirect.
# So, there is no need to check the response URL: it will always
# match the request. Instead, we will ensure that the response
# code indicates that the request was successful (i.e. no 404, or
# forward to some odd redirect).
if 200 <= r.status_code < 300:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
if dump_response:
print("+++++++++++++++++++++")
print(f"TARGET NAME : {social_network}")
print(f"USERNAME : {username}")
print(f"TARGET URL : {url}")
print(f"TEST METHOD : {error_type}")
try:
print(f"STATUS CODES : {net_info['errorCode']}")
except KeyError:
pass
print("Results...")
try:
print(f"RESPONSE CODE : {r.status_code}")
except Exception:
pass
try:
print(f"ERROR TEXT : {net_info['errorMsg']}")
except KeyError:
pass
print(">>>>> BEGIN RESPONSE TEXT")
try:
print(r.text)
except Exception:
pass
print("<<<<< END RESPONSE TEXT")
print("VERDICT : " + str(query_status))
print("+++++++++++++++++++++")
if error_codes is not None and r.status_code in error_codes:
query_status = QueryStatus.AVAILABLE
elif r.status_code >= 300 or r.status_code < 200:
query_status = QueryStatus.AVAILABLE
elif error_type == "response_url":
# For this detection method, we have turned off the redirect.
# So, there is no need to check the response URL: it will always
# match the request. Instead, we will ensure that the response
# code indicates that the request was successful (i.e. no 404, or
# forward to some odd redirect).
if 200 <= r.status_code < 300:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
else:
# It should be impossible to ever get here...
raise ValueError(
f"Unknown Error Type '{error_type}' for " f"site '{social_network}'"
)
# Notify caller about results of query.
result: QueryResult = QueryResult(
result = QueryResult(
username=username,
site_name=social_network,
site_url_user=url,
@ -536,14 +502,20 @@ def handler(signal_received, frame):
def main():
version_string = (
f"%(prog)s {__version__}\n"
+ f"{requests.__description__}: {requests.__version__}\n"
+ f"Python: {platform.python_version()}"
)
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description=f"{__longname__} (Version {__version__})",
description=f"{module_name} (Version {__version__})",
)
parser.add_argument(
"--version",
action="version",
version=f"{__shortname__} v{__version__}",
version=version_string,
help="Display version information and dependencies.",
)
parser.add_argument(
@ -568,6 +540,22 @@ def main():
dest="output",
help="If using single username, the output of the result will be saved to this file.",
)
parser.add_argument(
"--tor",
"-t",
action="store_true",
dest="tor",
default=False,
help="Make requests over Tor; increases runtime; requires Tor to be installed and in system path.",
)
parser.add_argument(
"--unique-tor",
"-u",
action="store_true",
dest="unique_tor",
default=False,
help="Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path.",
)
parser.add_argument(
"--csv",
action="store_true",
@ -599,20 +587,13 @@ def main():
default=None,
help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080",
)
parser.add_argument(
"--dump-response",
action="store_true",
dest="dump_response",
default=False,
help="Dump the HTTP response to stdout for targeted debugging.",
)
parser.add_argument(
"--json",
"-j",
metavar="JSON_FILE",
dest="json_file",
default=None,
help="Load data from a JSON file or an online, valid, JSON file. Upstream PR numbers also accepted.",
help="Load data from a JSON file or an online, valid, JSON file.",
)
parser.add_argument(
"--timeout",
@ -675,32 +656,6 @@ def main():
help="Include checking of NSFW sites from default list.",
)
# TODO deprecated in favor of --txt, retained for workflow compatibility, to be removed
# in future release
parser.add_argument(
"--no-txt",
action="store_true",
dest="no_txt",
default=False,
help="Disable creation of a txt file - WILL BE DEPRECATED",
)
parser.add_argument(
"--txt",
action="store_true",
dest="output_txt",
default=False,
help="Enable creation of a txt file",
)
parser.add_argument(
"--ignore-exclusions",
action="store_true",
dest="ignore_exclusions",
default=False,
help="Ignore upstream exclusions (may return more false positives)",
)
args = parser.parse_args()
# If the user presses CTRL-C, exit gracefully without throwing errors
@ -708,23 +663,38 @@ def main():
# Check for newer version of Sherlock. If it exists, let the user know about it
try:
latest_release_raw = requests.get(forge_api_latest_release, timeout=10).text
latest_release_json = json_loads(latest_release_raw)
latest_remote_tag = latest_release_json["tag_name"]
r = requests.get(
"https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/sherlock.py"
)
if latest_remote_tag[1:] != __version__:
remote_version = str(re.findall('__version__ = "(.*)"', r.text)[0])
local_version = __version__
if remote_version != local_version:
print(
f"Update available! {__version__} --> {latest_remote_tag[1:]}"
f"\n{latest_release_json['html_url']}"
"Update Available!\n"
+ f"You are running version {local_version}. Version {remote_version} is available at https://github.com/sherlock-project/sherlock"
)
except Exception as error:
print(f"A problem occurred while checking for an update: {error}")
# Argument check
# TODO regex check on args.proxy
if args.tor and (args.proxy is not None):
raise Exception("Tor and Proxy cannot be set at the same time.")
# Make prompts
if args.proxy is not None:
print("Using the proxy: " + args.proxy)
if args.tor or args.unique_tor:
print("Using Tor to make requests")
print(
"Warning: some websites might refuse connecting over Tor, so note that using this option might increase connection errors."
)
if args.no_color:
# Disable color output.
init(strip=True, convert=False)
@ -746,32 +716,10 @@ def main():
try:
if args.local:
sites = SitesInformation(
os.path.join(os.path.dirname(__file__), "resources/data.json"),
honor_exclusions=False,
os.path.join(os.path.dirname(__file__), "resources/data.json")
)
else:
json_file_location = args.json_file
if args.json_file:
# If --json parameter is a number, interpret it as a pull request number
if args.json_file.isnumeric():
pull_number = args.json_file
pull_url = f"https://api.github.com/repos/sherlock-project/sherlock/pulls/{pull_number}"
pull_request_raw = requests.get(pull_url, timeout=10).text
pull_request_json = json_loads(pull_request_raw)
# Check if it's a valid pull request
if "message" in pull_request_json:
print(f"ERROR: Pull request #{pull_number} not found.")
sys.exit(1)
head_commit_sha = pull_request_json["head"]["sha"]
json_file_location = f"https://raw.githubusercontent.com/sherlock-project/sherlock/{head_commit_sha}/sherlock_project/resources/data.json"
sites = SitesInformation(
data_file_path=json_file_location,
honor_exclusions=not args.ignore_exclusions,
do_not_exclude=args.site_list,
)
sites = SitesInformation(args.json_file)
except Exception as error:
print(f"ERROR: {error}")
sys.exit(1)
@ -825,7 +773,8 @@ def main():
username,
site_data,
query_notify,
dump_response=args.dump_response,
tor=args.tor,
unique_tor=args.unique_tor,
proxy=args.proxy,
timeout=args.timeout,
)
@ -840,15 +789,14 @@ def main():
else:
result_file = f"{username}.txt"
if args.output_txt:
with open(result_file, "w", encoding="utf-8") as file:
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("status").status == QueryStatus.CLAIMED:
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write(f"Total Websites Username Detected On : {exists_counter}\n")
with open(result_file, "w", encoding="utf-8") as file:
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("status").status == QueryStatus.CLAIMED:
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write(f"Total Websites Username Detected On : {exists_counter}\n")
if args.csv:
result_file = f"{username}.csv"
@ -925,8 +873,8 @@ def main():
{
"username": usernames,
"name": names,
"url_main": [f'=HYPERLINK(\"{u}\")' for u in url_main],
"url_user": [f'=HYPERLINK(\"{u}\")' for u in url_user],
"url_main": url_main,
"url_user": url_user,
"exists": exists,
"http_status": http_status,
"response_time_s": response_time_s,

View File

@ -7,10 +7,6 @@ import json
import requests
import secrets
MANIFEST_URL = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock_project/resources/data.json"
EXCLUSIONS_URL = "https://raw.githubusercontent.com/sherlock-project/sherlock/refs/heads/exclusions/false_positive_exclusions.txt"
class SiteInformation:
def __init__(self, name, url_home, url_username_format, username_claimed,
information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
@ -71,17 +67,12 @@ class SiteInformation:
Return Value:
Nicely formatted string to get information about this object.
"""
return f"{self.name} ({self.url_home})"
class SitesInformation:
def __init__(
self,
data_file_path: str|None = None,
honor_exclusions: bool = True,
do_not_exclude: list[str] = [],
):
def __init__(self, data_file_path=None):
"""Create Sites Information Object.
Contains information about all supported websites.
@ -119,7 +110,7 @@ class SitesInformation:
# The default data file is the live data.json which is in the GitHub repo. The reason why we are using
# this instead of the local one is so that the user has the most up-to-date data. This prevents
# users from creating issue about false positives which has already been fixed or having outdated data
data_file_path = MANIFEST_URL
data_file_path = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json"
# Ensure that specified data file has correct extension.
if not data_file_path.lower().endswith(".json"):
@ -129,7 +120,7 @@ class SitesInformation:
if data_file_path.lower().startswith("http"):
# Reference is to a URL.
try:
response = requests.get(url=data_file_path, timeout=30)
response = requests.get(url=data_file_path)
except Exception as error:
raise FileNotFoundError(
f"Problem while attempting to access data file URL '{data_file_path}': {error}"
@ -161,31 +152,9 @@ class SitesInformation:
raise FileNotFoundError(f"Problem while attempting to access "
f"data file '{data_file_path}'."
)
site_data.pop('$schema', None)
if honor_exclusions:
try:
response = requests.get(url=EXCLUSIONS_URL, timeout=10)
if response.status_code == 200:
exclusions = response.text.splitlines()
exclusions = [exclusion.strip() for exclusion in exclusions]
for site in do_not_exclude:
if site in exclusions:
exclusions.remove(site)
for exclusion in exclusions:
try:
site_data.pop(exclusion, None)
except KeyError:
pass
except Exception:
# If there was any problem loading the exclusions, just continue without them
print("Warning: Could not load exclusions, continuing without them.")
honor_exclusions = False
self.sites = {}
# Add all site information from the json file to internal site list.
@ -205,7 +174,7 @@ class SitesInformation:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
)
except TypeError:
except TypeError as error:
print(f"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\nSkipping target.\n")
return
@ -225,7 +194,7 @@ class SitesInformation:
for site in self.sites:
if self.sites[site].is_nsfw and site.casefold() not in do_not_remove:
continue
sites[site] = self.sites[site]
sites[site] = self.sites[site]
self.sites = sites
def site_name_list(self):

View File

@ -0,0 +1,4 @@
"""Sherlock Tests
This package contains various submodules used to run tests.
"""

213
sherlock/tests/all.py Normal file
View File

@ -0,0 +1,213 @@
"""Sherlock Tests
This module contains various tests.
"""
from tests.base import SherlockBaseTest
import exrex
class SherlockDetectTests(SherlockBaseTest):
def test_detect_true_via_message(self):
"""Test Username Does Exist (Via Message).
This test ensures that the "message" detection mechanism of
ensuring that a Username does exist works properly.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
site = "AllMyLinks"
site_data = self.site_data_all[site]
# Ensure that the site's detection method has not changed.
self.assertEqual("message", site_data["errorType"])
self.username_check([site_data["username_claimed"]], [site], exist_check=True)
return
def test_detect_false_via_message(self):
"""Test Username Does Not Exist (Via Message).
This test ensures that the "message" detection mechanism of
ensuring that a Username does *not* exist works properly.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
site = "AllMyLinks"
site_data = self.site_data_all[site]
# Ensure that the site's detection method has not changed.
self.assertEqual("message", site_data["errorType"])
# Generate a valid username based on the regex for a username that the
# site supports that is *most likely* not taken. The regex is slightly
# modified version of site_data["regexCheck"] as we want a username
# that has the maximum length that is supported by the site. This way,
# we wont generate a random username that might actually exist. This
# method is very hacky, but it does the job as having hardcoded
# usernames that dont exists will lead to people with ill intent to
# create an account with that username which will break the tests
valid_username = exrex.getone(r"^[a-z0-9][a-z0-9-]{32}$")
self.username_check([valid_username], [site], exist_check=False)
return
def test_detect_true_via_status_code(self):
"""Test Username Does Exist (Via Status Code).
This test ensures that the "status code" detection mechanism of
ensuring that a Username does exist works properly.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
site = "BitBucket"
site_data = self.site_data_all[site]
# Ensure that the site's detection method has not changed.
self.assertEqual("status_code", site_data["errorType"])
self.username_check([site_data["username_claimed"]], [site], exist_check=True)
return
def test_detect_false_via_status_code(self):
"""Test Username Does Not Exist (Via Status Code).
This test ensures that the "status code" detection mechanism of
ensuring that a Username does *not* exist works properly.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
site = "BitBucket"
site_data = self.site_data_all[site]
# Ensure that the site's detection method has not changed.
self.assertEqual("status_code", site_data["errorType"])
# Generate a valid username based on the regex for a username that the
# site supports that is *most likely* not taken. The regex is slightly
# modified version of site_data["regexCheck"] as we want a username
# that has the maximum length that is supported by the site. This way,
# we wont generate a random username that might actually exist. This
# method is very hacky, but it does the job as having hardcoded
# usernames that dont exists will lead to people with ill intent to
# create an account with that username which will break the tests
valid_username = exrex.getone(r"^[a-zA-Z0-9-_]{30}")
self.username_check([valid_username], [site], exist_check=False)
return
class SherlockSiteCoverageTests(SherlockBaseTest):
def test_coverage_false_via_status(self):
"""Test Username Does Not Exist Site Coverage (Via HTTP Status).
This test checks all sites with the "HTTP Status" detection mechanism
to ensure that a Username that does not exist is reported that way.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
self.detect_type_check("status_code", exist_check=False)
return
def test_coverage_true_via_status(self):
"""Test Username Does Exist Site Coverage (Via HTTP Status).
This test checks all sites with the "HTTP Status" detection mechanism
to ensure that a Username that does exist is reported that way.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
self.detect_type_check("status_code", exist_check=True)
return
def test_coverage_false_via_message(self):
"""Test Username Does Not Exist Site Coverage (Via Error Message).
This test checks all sites with the "Error Message" detection mechanism
to ensure that a Username that does not exist is reported that way.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
self.detect_type_check("message", exist_check=False)
return
def test_coverage_true_via_message(self):
"""Test Username Does Exist Site Coverage (Via Error Message).
This test checks all sites with the "Error Message" detection mechanism
to ensure that a Username that does exist is reported that way.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if detection mechanism did not work as expected.
"""
self.detect_type_check("message", exist_check=True)
return
def test_coverage_total(self):
"""Test Site Coverage Is Total.
This test checks that all sites have test data available.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Will trigger an assert if we do not have total coverage.
"""
self.coverage_total_check()
return

224
sherlock/tests/base.py Normal file
View File

@ -0,0 +1,224 @@
"""Sherlock Base Tests
This module contains various utilities for running tests.
"""
import os
import os.path
import unittest
import sherlock
from result import QueryStatus
from notify import QueryNotify
from sites import SitesInformation
import warnings
class SherlockBaseTest(unittest.TestCase):
def setUp(self):
"""Sherlock Base Test Setup.
Does common setup tasks for base Sherlock tests.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
"""
# This ignores the ResourceWarning from an unclosed SSLSocket.
# TODO: Figure out how to fix the code so this is not needed.
warnings.simplefilter("ignore", ResourceWarning)
# Create object with all information about sites we are aware of.
sites = SitesInformation(data_file_path=os.path.join(os.path.dirname(__file__), "../resources/data.json"))
# Create original dictionary from SitesInformation() object.
# Eventually, the rest of the code will be updated to use the new object
# directly, but this will glue the two pieces together.
site_data_all = {}
for site in sites:
site_data_all[site.name] = site.information
self.site_data_all = site_data_all
# Load excluded sites list, if any
excluded_sites_path = os.path.join(os.path.dirname(os.path.realpath(sherlock.__file__)), "tests/.excluded_sites")
try:
with open(excluded_sites_path, "r", encoding="utf-8") as excluded_sites_file:
self.excluded_sites = excluded_sites_file.read().splitlines()
except FileNotFoundError:
self.excluded_sites = []
# Create notify object for query results.
self.query_notify = QueryNotify()
self.tor = False
self.unique_tor = False
self.timeout = None
self.skip_error_sites = True
return
def site_data_filter(self, site_list):
"""Filter Site Data.
Keyword Arguments:
self -- This object.
site_list -- List of strings corresponding to sites which
should be filtered.
Return Value:
Dictionary containing sub-set of site data specified by "site_list".
"""
# Create new dictionary that has filtered site data based on input.
# Note that any site specified which is not understood will generate
# an error.
site_data = {}
for site in site_list:
with self.subTest(f"Checking test vector Site '{site}' "
f"exists in total site data."
):
site_data[site] = self.site_data_all[site]
return site_data
def username_check(self, username_list, site_list, exist_check=True):
"""Username Exist Check.
Keyword Arguments:
self -- This object.
username_list -- List of strings corresponding to usernames
which should exist on *all* of the sites.
site_list -- List of strings corresponding to sites which
should be filtered.
exist_check -- Boolean which indicates if this should be
a check for Username existence,
or non-existence.
Return Value:
Nothing.
Will trigger an assert if Username does not have the expected
existence state.
"""
# Filter all site data down to just what is needed for this test.
site_data = self.site_data_filter(site_list)
if exist_check:
check_type_text = "claimed"
exist_result_desired = QueryStatus.CLAIMED
else:
check_type_text = "available"
exist_result_desired = QueryStatus.AVAILABLE
for username in username_list:
results = sherlock.sherlock(username,
site_data,
self.query_notify,
tor=self.tor,
unique_tor=self.unique_tor,
timeout=self.timeout
)
for site, result in results.items():
with self.subTest(f"Checking Username '{username}' "
f"{check_type_text} on Site '{site}'"
):
if (
(self.skip_error_sites == True) and
(result["status"].status == QueryStatus.UNKNOWN)
):
#Some error connecting to site.
self.skipTest(f"Skipping Username '{username}' "
f"{check_type_text} on Site '{site}': "
f"Site returned error status."
)
self.assertEqual(exist_result_desired,
result["status"].status)
return
def detect_type_check(self, detect_type, exist_check=True):
"""Username Exist Check.
Keyword Arguments:
self -- This object.
detect_type -- String corresponding to detection algorithm
which is desired to be tested.
Note that only sites which have documented
usernames which exist and do not exist
will be tested.
exist_check -- Boolean which indicates if this should be
a check for Username existence,
or non-existence.
Return Value:
Nothing.
Runs tests on all sites using the indicated detection algorithm
and which also has test vectors specified.
Will trigger an assert if Username does not have the expected
existence state.
"""
# Dictionary of sites that should be tested for having a username.
# This will allow us to test sites with a common username in parallel.
sites_by_username = {}
for site, site_data in self.site_data_all.items():
if (
(site in self.excluded_sites) or
(site_data["errorType"] != detect_type) or
(site_data.get("username_claimed") is None) or
(site_data.get("username_unclaimed") is None)
):
# This is either not a site we are interested in, or the
# site does not contain the required information to do
# the tests.
pass
else:
# We should run a test on this site.
# Figure out which type of user
if exist_check:
username = site_data.get("username_claimed")
else:
username = site_data.get("username_unclaimed")
# Add this site to the list of sites corresponding to this
# username.
if username in sites_by_username:
sites_by_username[username].append(site)
else:
sites_by_username[username] = [site]
# Check on the username availability against all of the sites.
for username, site_list in sites_by_username.items():
self.username_check([username],
site_list,
exist_check=exist_check
)
return
def coverage_total_check(self):
"""Total Coverage Check.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
Counts up all Sites with full test data available.
Will trigger an assert if any Site does not have test coverage.
"""
site_no_tests_list = []
for site, site_data in self.site_data_all.items():
if site_data.get("username_claimed") is None:
# Test information not available on this site.
site_no_tests_list.append(site)
self.assertEqual("", ", ".join(site_no_tests_list))
return

View File

@ -0,0 +1,29 @@
import importlib
import unittest
import sys
sys.path.append('../')
import sherlock as sh
checksymbols = []
checksymbols = ["_", "-", "."]
"""Test for multiple usernames.
This test ensures that the function multiple_usernames works properly. More specific,
different scenarios are tested and only usernames that contain this specific sequence: {?}
should return positive.
Keyword Arguments:
self -- This object.
Return Value:
Nothing.
"""
class TestMultipleUsernames(unittest.TestCase):
def test_area(self):
test_usernames = ["test{?}test" , "test{?feo" , "test"]
for name in test_usernames:
if(sh.check_for_parameter(name)):
self.assertAlmostEqual(sh.multiple_usernames(name), ["test_test" , "test-test" , "test.test"])
else:
self.assertAlmostEqual(name, name)

View File

@ -1,30 +0,0 @@
""" Sherlock Module
This module contains the main logic to search for usernames at social
networks.
"""
from importlib.metadata import version as pkg_version, PackageNotFoundError
import pathlib
import tomli
def get_version() -> str:
"""Fetch the version number of the installed package."""
try:
return pkg_version("sherlock_project")
except PackageNotFoundError:
pyproject_path: pathlib.Path = pathlib.Path(__file__).resolve().parent.parent / "pyproject.toml"
with pyproject_path.open("rb") as f:
pyproject_data = tomli.load(f)
return pyproject_data["tool"]["poetry"]["version"]
# This variable is only used to check for ImportErrors induced by users running as script rather than as module or package
import_error_test_var = None
__shortname__ = "Sherlock"
__longname__ = "Sherlock: Find Usernames Across Social Networks"
__version__ = get_version()
forge_api_latest_release = "https://api.github.com/repos/sherlock-project/sherlock/releases/latest"

View File

@ -1,149 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Sherlock Target Manifest",
"description": "Social media targets to probe for the existence of known usernames",
"type": "object",
"properties": {
"$schema": { "type": "string" }
},
"patternProperties": {
"^(?!\\$).*?$": {
"type": "object",
"description": "Target name and associated information (key should be human readable name)",
"required": ["url", "urlMain", "errorType", "username_claimed"],
"properties": {
"url": { "type": "string" },
"urlMain": { "type": "string" },
"urlProbe": { "type": "string" },
"username_claimed": { "type": "string" },
"regexCheck": { "type": "string" },
"isNSFW": { "type": "boolean" },
"headers": { "type": "object" },
"request_payload": { "type": "object" },
"__comment__": {
"type": "string",
"description": "Used to clarify important target information if (and only if) a commit message would not suffice.\nThis key should not be parsed anywhere within Sherlock."
},
"tags": {
"oneOf": [
{ "$ref": "#/$defs/tag" },
{ "type": "array", "items": { "$ref": "#/$defs/tag" } }
]
},
"request_method": {
"type": "string",
"enum": ["GET", "POST", "HEAD", "PUT"]
},
"errorType": {
"oneOf": [
{
"type": "string",
"enum": ["message", "response_url", "status_code"]
},
{
"type": "array",
"items": {
"type": "string",
"enum": ["message", "response_url", "status_code"]
}
}
]
},
"errorMsg": {
"oneOf": [
{ "type": "string" },
{ "type": "array", "items": { "type": "string" } }
]
},
"errorCode": {
"oneOf": [
{ "type": "integer" },
{ "type": "array", "items": { "type": "integer" } }
]
},
"errorUrl": { "type": "string" },
"response_url": { "type": "string" }
},
"dependencies": {
"errorMsg": {
"oneOf": [
{ "properties": { "errorType": { "const": "message" } } },
{
"properties": {
"errorType": {
"type": "array",
"contains": { "const": "message" }
}
}
}
]
},
"errorUrl": {
"oneOf": [
{ "properties": { "errorType": { "const": "response_url" } } },
{
"properties": {
"errorType": {
"type": "array",
"contains": { "const": "response_url" }
}
}
}
]
},
"errorCode": {
"oneOf": [
{ "properties": { "errorType": { "const": "status_code" } } },
{
"properties": {
"errorType": {
"type": "array",
"contains": { "const": "status_code" }
}
}
}
]
}
},
"allOf": [
{
"if": {
"anyOf": [
{ "properties": { "errorType": { "const": "message" } } },
{
"properties": {
"errorType": {
"type": "array",
"contains": { "const": "message" }
}
}
}
]
},
"then": { "required": ["errorMsg"] }
},
{
"if": {
"anyOf": [
{ "properties": { "errorType": { "const": "response_url" } } },
{
"properties": {
"errorType": {
"type": "array",
"contains": { "const": "response_url" }
}
}
}
]
},
"then": { "required": ["errorUrl"] }
}
],
"additionalProperties": false
}
},
"additionalProperties": false,
"$defs": {
"tag": { "type": "string", "enum": ["adult", "gaming"] }
}
}

31
site_list.py Normal file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env python
# This module generates the listing of supported sites which can be found in
# sites.md. It also organizes all the sites in alphanumeric order
import json
# Read the data.json file
with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file:
data: dict = json.load(data_file)
# Removes schema-specific keywords for proper processing
social_networks: dict = dict(data)
social_networks.pop('$schema', None)
# Sort the social networks in alphanumeric order
social_networks: list = sorted(social_networks.items())
# Write the list of supported sites to sites.md
with open("sites.md", "w") as site_file:
site_file.write(f"## List Of Supported Sites ({len(social_networks)} Sites In Total!)\n")
for social_network, info in social_networks:
url_main = info["urlMain"]
is_nsfw = "**(NSFW)**" if info.get("isNSFW") else ""
site_file.write(f"1. ![](https://www.google.com/s2/favicons?domain={url_main}) [{social_network}]({url_main}) {is_nsfw}\n")
# Overwrite the data.json file with sorted data
with open("sherlock/resources/data.json", "w") as data_file:
sorted_data = json.dumps(data, indent=2, sort_keys=True)
data_file.write(sorted_data)
data_file.write("\n")
print("Finished updating supported site listing!")

409
sites.md Normal file
View File

@ -0,0 +1,409 @@
## List Of Supported Sites (408 Sites In Total!)
1. ![](https://www.google.com/s2/favicons?domain=https://www.1337x.to/) [1337x](https://www.1337x.to/)
1. ![](https://www.google.com/s2/favicons?domain=https://2Dimensions.com/) [2Dimensions](https://2Dimensions.com/)
1. ![](https://www.google.com/s2/favicons?domain=http://forum.3dnews.ru/) [3dnews](http://forum.3dnews.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.7cups.com/) [7Cups](https://www.7cups.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://8tracks.com/) [8tracks](https://8tracks.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.9gag.com/) [9GAG](https://www.9gag.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://apclips.com/) [APClips](https://apclips.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://about.me/) [About.me](https://about.me/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.academia.edu/) [Academia.edu](https://www.academia.edu/)
1. ![](https://www.google.com/s2/favicons?domain=https://admireme.vip/) [AdmireMe.Vip](https://admireme.vip/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://airlinepilot.life/) [Air Pilot Life](https://airlinepilot.life/)
1. ![](https://www.google.com/s2/favicons?domain=https://airbit.com/) [Airbit](https://airbit.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.airliners.net/) [Airliners](https://www.airliners.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.alik.cz/) [Alik.cz](https://www.alik.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.allthingsworn.com) [All Things Worn](https://www.allthingsworn.com) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://allmylinks.com/) [AllMyLinks](https://allmylinks.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://aminoapps.com) [Amino](https://aminoapps.com)
1. ![](https://www.google.com/s2/favicons?domain=https://aniworld.to/) [AniWorld](https://aniworld.to/)
1. ![](https://www.google.com/s2/favicons?domain=https://anilist.co/) [Anilist](https://anilist.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://developer.apple.com) [Apple Developer](https://developer.apple.com)
1. ![](https://www.google.com/s2/favicons?domain=https://discussions.apple.com) [Apple Discussions](https://discussions.apple.com)
1. ![](https://www.google.com/s2/favicons?domain=https://archiveofourown.org/) [Archive of Our Own](https://archiveofourown.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://archive.org) [Archive.org](https://archive.org)
1. ![](https://www.google.com/s2/favicons?domain=https://www.artstation.com/) [ArtStation](https://www.artstation.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://asciinema.org) [Asciinema](https://asciinema.org)
1. ![](https://www.google.com/s2/favicons?domain=https://ask.fedoraproject.org/) [Ask Fedora](https://ask.fedoraproject.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://ask.fm/) [AskFM](https://ask.fm/)
1. ![](https://www.google.com/s2/favicons?domain=https://audiojungle.net/) [Audiojungle](https://audiojungle.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.autofrage.net/) [Autofrage](https://www.autofrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.avizo.cz/) [Avizo](https://www.avizo.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://blip.fm/) [BLIP.fm](https://blip.fm/)
1. ![](https://www.google.com/s2/favicons?domain=https://booth.pm/) [BOOTH](https://booth.pm/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.bandcamp.com/) [Bandcamp](https://www.bandcamp.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.bazar.cz/) [Bazar.cz](https://www.bazar.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.behance.net/) [Behance](https://www.behance.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://bezuzyteczna.pl) [Bezuzyteczna](https://bezuzyteczna.pl)
1. ![](https://www.google.com/s2/favicons?domain=https://www.biggerpockets.com/) [BiggerPockets](https://www.biggerpockets.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.bikemap.net/) [Bikemap](https://www.bikemap.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.dangerousthings.com/) [BioHacking](https://forum.dangerousthings.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://bitbucket.org/) [BitBucket](https://bitbucket.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://bitwarden.com/) [Bitwarden Forum](https://bitwarden.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.blipfoto.com/) [Blipfoto](https://www.blipfoto.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.blogger.com/) [Blogger](https://www.blogger.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://bodyspace.bodybuilding.com/) [BodyBuilding](https://bodyspace.bodybuilding.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://pt.bongacams.com) [BongaCams](https://pt.bongacams.com) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.bookcrossing.com/) [Bookcrossing](https://www.bookcrossing.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.brave.com/) [BraveCommunity](https://community.brave.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://bugcrowd.com/) [BugCrowd](https://bugcrowd.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.buymeacoffee.com/) [BuyMeACoffee](https://www.buymeacoffee.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://buzzfeed.com/) [BuzzFeed](https://buzzfeed.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.cgtrader.com) [CGTrader](https://www.cgtrader.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.cnet.com/) [CNET](https://www.cnet.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://cssbattle.dev) [CSSBattle](https://cssbattle.dev)
1. ![](https://www.google.com/s2/favicons?domain=https://ctan.org/) [CTAN](https://ctan.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://caddy.community/) [Caddy Community](https://caddy.community/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.cartalk.com/) [Car Talk Community](https://community.cartalk.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://carbonmade.com/) [Carbonmade](https://carbonmade.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://career.habr.com/) [Career.habr](https://career.habr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.championat.com/) [Championat](https://www.championat.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://chaos.social/) [Chaos](https://chaos.social/)
1. ![](https://www.google.com/s2/favicons?domain=https://chatujme.cz/) [Chatujme.cz](https://chatujme.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://chaturbate.com) [ChaturBate](https://chaturbate.com) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.chess.com/) [Chess](https://www.chess.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://choice.community/) [Choice Community](https://choice.community/)
1. ![](https://www.google.com/s2/favicons?domain=https://clapperapp.com/) [Clapper](https://clapperapp.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.cloudflare.com/) [CloudflareCommunity](https://community.cloudflare.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.clozemaster.com) [Clozemaster](https://www.clozemaster.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.clubhouse.com) [Clubhouse](https://www.clubhouse.com)
1. ![](https://www.google.com/s2/favicons?domain=https://codesnippets.fandom.com) [Code Snippet Wiki](https://codesnippets.fandom.com)
1. ![](https://www.google.com/s2/favicons?domain=https://codeberg.org/) [Codeberg](https://codeberg.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.codecademy.com/) [Codecademy](https://www.codecademy.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.codechef.com/) [Codechef](https://www.codechef.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://codeforces.com/) [Codeforces](https://codeforces.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://codepen.io/) [Codepen](https://codepen.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://codersrank.io/) [Coders Rank](https://codersrank.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://coderwall.com) [Coderwall](https://coderwall.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.codewars.com) [Codewars](https://www.codewars.com)
1. ![](https://www.google.com/s2/favicons?domain=https://coinvote.cc/) [Coinvote](https://coinvote.cc/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.colourlovers.com/) [ColourLovers](https://www.colourlovers.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://contently.com/) [Contently](https://contently.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://coroflot.com/) [Coroflot](https://coroflot.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.cracked.com/) [Cracked](https://www.cracked.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://crevado.com/) [Crevado](https://crevado.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://crowdin.com/) [Crowdin](https://crowdin.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.cryptomator.org/) [Cryptomator Forum](https://community.cryptomator.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://cults3d.com/en) [Cults3D](https://cults3d.com/en)
1. ![](https://www.google.com/s2/favicons?domain=https://cyberdefenders.org/) [CyberDefenders](https://cyberdefenders.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://dev.to/) [DEV Community](https://dev.to/)
1. ![](https://www.google.com/s2/favicons?domain=https://dmoj.ca/) [DMOJ](https://dmoj.ca/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.dailymotion.com/) [DailyMotion](https://www.dailymotion.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.dealabs.com/) [Dealabs](https://www.dealabs.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://deviantart.com) [DeviantART](https://deviantart.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.discogs.com/) [Discogs](https://www.discogs.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://discuss.elastic.co/) [Discuss.Elastic.co](https://discuss.elastic.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://disqus.com/) [Disqus](https://disqus.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hub.docker.com/) [Docker Hub](https://hub.docker.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://dribbble.com/) [Dribbble](https://dribbble.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://duolingo.com/) [Duolingo](https://duolingo.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.eintracht.de/) [Eintracht Frankfurt Forum](https://community.eintracht.de/)
1. ![](https://www.google.com/s2/favicons?domain=https://forums.envato.com/) [Envato Forum](https://forums.envato.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.erome.com/) [Erome](https://www.erome.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.etsy.com/) [Etsy](https://www.etsy.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://euw.op.gg/) [Euw](https://euw.op.gg/)
1. ![](https://www.google.com/s2/favicons?domain=https://exposure.co/) [Exposure](https://exposure.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.eyeem.com/) [EyeEm](https://www.eyeem.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://f3.cool/) [F3.cool](https://f3.cool/)
1. ![](https://www.google.com/s2/favicons?domain=https://fameswap.com/) [Fameswap](https://fameswap.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.fandom.com/) [Fandom](https://www.fandom.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.finanzfrage.net/) [Finanzfrage](https://www.finanzfrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.fiverr.com/) [Fiverr](https://www.fiverr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.flickr.com/) [Flickr](https://www.flickr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.flightradar24.com/) [Flightradar24](https://www.flightradar24.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://flipboard.com/) [Flipboard](https://flipboard.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.rusfootball.info/) [Football](https://www.rusfootball.info/)
1. ![](https://www.google.com/s2/favicons?domain=https://fortnitetracker.com/challenges) [FortniteTracker](https://fortnitetracker.com/challenges)
1. ![](https://www.google.com/s2/favicons?domain=https://www.forumophilia.com/) [Forum Ophilia](https://www.forumophilia.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://fosstodon.org/) [Fosstodon](https://fosstodon.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://freelance.habr.com/) [Freelance.habr](https://freelance.habr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.freelancer.com/) [Freelancer](https://www.freelancer.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://freesound.org/) [Freesound](https://freesound.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://gitlab.gnome.org/) [GNOME VCS](https://gitlab.gnome.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.gaiaonline.com/) [GaiaOnline](https://www.gaiaonline.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.gamespot.com/) [Gamespot](https://www.gamespot.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.geeksforgeeks.org/) [GeeksforGeeks](https://www.geeksforgeeks.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://genius.com/) [Genius (Artists)](https://genius.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://genius.com/) [Genius (Users)](https://genius.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.gesundheitsfrage.net/) [Gesundheitsfrage](https://www.gesundheitsfrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://getmyuni.com/) [GetMyUni](https://getmyuni.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.giantbomb.com/) [Giant Bomb](https://www.giantbomb.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://giphy.com/) [Giphy](https://giphy.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://gitbook.com/) [GitBook](https://gitbook.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.github.com/) [GitHub](https://www.github.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://gitlab.com/) [GitLab](https://gitlab.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://gitee.com/) [Gitee](https://gitee.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.goodreads.com/) [GoodReads](https://www.goodreads.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://play.google.com) [Google Play](https://play.google.com)
1. ![](https://www.google.com/s2/favicons?domain=https://gradle.org/) [Gradle](https://gradle.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.grailed.com/) [Grailed](https://www.grailed.com/)
1. ![](https://www.google.com/s2/favicons?domain=http://en.gravatar.com/) [Gravatar](http://en.gravatar.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.gumroad.com/) [Gumroad](https://www.gumroad.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.gutefrage.net/) [Gutefrage](https://www.gutefrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.hackthebox.eu/) [HackTheBox](https://forum.hackthebox.eu/)
1. ![](https://www.google.com/s2/favicons?domain=https://hackaday.io/) [Hackaday](https://hackaday.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://hackenproof.com/) [HackenProof (Hackers)](https://hackenproof.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hackerearth.com/) [HackerEarth](https://hackerearth.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://news.ycombinator.com/) [HackerNews](https://news.ycombinator.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hackerone.com/) [HackerOne](https://hackerone.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hackerrank.com/) [HackerRank](https://hackerrank.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://scholar.harvard.edu/) [Harvard Scholar](https://scholar.harvard.edu/)
1. ![](https://www.google.com/s2/favicons?domain=https://hashnode.com) [Hashnode](https://hashnode.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.heavy-r.com/) [Heavy-R](https://www.heavy-r.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://holopin.io) [Holopin](https://holopin.io)
1. ![](https://www.google.com/s2/favicons?domain=https://houzz.com/) [Houzz](https://houzz.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hubpages.com/) [HubPages](https://hubpages.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hubski.com/) [Hubski](https://hubski.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://hudsonrock.com) [HudsonRock](https://hudsonrock.com)
1. ![](https://www.google.com/s2/favicons?domain=https://icq.com/) [ICQ](https://icq.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.ifttt.com/) [IFTTT](https://www.ifttt.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://irc-galleria.net/) [IRC-Galleria](https://irc-galleria.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.icons8.com/) [Icons8 Community](https://community.icons8.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.imagefap.com/) [Image Fap](https://www.imagefap.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://imgup.cz/) [ImgUp.cz](https://imgup.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://imgur.com/) [Imgur](https://imgur.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://instagram.com/) [Instagram](https://instagram.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.instructables.com/) [Instructables](https://www.instructables.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://app.intigriti.com) [Intigriti](https://app.intigriti.com)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.ionicframework.com/) [Ionic Forum](https://forum.ionicframework.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://issuu.com/) [Issuu](https://issuu.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://itch.io/) [Itch.io](https://itch.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.itemfix.com/) [Itemfix](https://www.itemfix.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://translate.jellyfin.org/) [Jellyfin Weblate](https://translate.jellyfin.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://jimdosite.com/) [Jimdo](https://jimdosite.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://discourse.joplinapp.org/) [Joplin Forum](https://discourse.joplinapp.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.keakr.com/) [KEAKR](https://www.keakr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.kaggle.com/) [Kaggle](https://www.kaggle.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://keybase.io/) [Keybase](https://keybase.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://kick.com/) [Kick](https://kick.com/)
1. ![](https://www.google.com/s2/favicons?domain=http://kik.me/) [Kik](http://kik.me/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.kongregate.com/) [Kongregate](https://www.kongregate.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://linux.org.ru/) [LOR](https://linux.org.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://launchpad.net/) [Launchpad](https://launchpad.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://leetcode.com/) [LeetCode](https://leetcode.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.lesswrong.com/) [LessWrong](https://www.lesswrong.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://letterboxd.com/) [Letterboxd](https://letterboxd.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.librarything.com/) [LibraryThing](https://www.librarything.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://lichess.org) [Lichess](https://lichess.org)
1. ![](https://www.google.com/s2/favicons?domain=https://linkedin.com) [LinkedIn](https://linkedin.com)
1. ![](https://www.google.com/s2/favicons?domain=https://linktr.ee/) [Linktree](https://linktr.ee/)
1. ![](https://www.google.com/s2/favicons?domain=https://listed.to/) [Listed](https://listed.to/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.livejournal.com/) [LiveJournal](https://www.livejournal.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://lobste.rs/) [Lobsters](https://lobste.rs/)
1. ![](https://www.google.com/s2/favicons?domain=https://lottiefiles.com/) [LottieFiles](https://lottiefiles.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.lushstories.com/) [LushStories](https://www.lushstories.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://forums.mmorpg.com/) [MMORPG Forum](https://forums.mmorpg.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://mapify.travel/) [Mapify](https://mapify.travel/)
1. ![](https://www.google.com/s2/favicons?domain=https://medium.com/) [Medium](https://medium.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.memrise.com/) [Memrise](https://www.memrise.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://minecraft.net/) [Minecraft](https://minecraft.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.mixcloud.com/) [MixCloud](https://www.mixcloud.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://monkeytype.com/) [Monkeytype](https://monkeytype.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://motherless.com/) [Motherless](https://motherless.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.motorradfrage.net/) [Motorradfrage](https://www.motorradfrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://myanimelist.net/) [MyAnimeList](https://myanimelist.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.myminifactory.com/) [MyMiniFactory](https://www.myminifactory.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://mydramalist.com) [Mydramalist](https://mydramalist.com)
1. ![](https://www.google.com/s2/favicons?domain=https://myspace.com/) [Myspace](https://myspace.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.native-instruments.com/forum/) [NICommunityForum](https://www.native-instruments.com/forum/)
1. ![](https://www.google.com/s2/favicons?domain=https://nationstates.net) [NationStates Nation](https://nationstates.net)
1. ![](https://www.google.com/s2/favicons?domain=https://nationstates.net) [NationStates Region](https://nationstates.net)
1. ![](https://www.google.com/s2/favicons?domain=https://naver.com) [Naver](https://naver.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.needrom.com/) [Needrom](https://www.needrom.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://newgrounds.com) [Newgrounds](https://newgrounds.com)
1. ![](https://www.google.com/s2/favicons?domain=https://nextcloud.com/) [Nextcloud Forum](https://nextcloud.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://nightbot.tv/) [Nightbot](https://nightbot.tv/)
1. ![](https://www.google.com/s2/favicons?domain=https://ninjakiwi.com/) [Ninja Kiwi](https://ninjakiwi.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.nintendolife.com/) [NintendoLife](https://www.nintendolife.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.nitrotype.com/) [NitroType](https://www.nitrotype.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://notabug.org/) [NotABug.org](https://notabug.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://nyaa.si/) [Nyaa.si](https://nyaa.si/)
1. ![](https://www.google.com/s2/favicons?domain=https://ogu.gg/) [OGUsers](https://ogu.gg/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.openstreetmap.org/) [OpenStreetMap](https://www.openstreetmap.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://opensource.com/) [Opensource](https://opensource.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://ourdjtalk.com/) [OurDJTalk](https://ourdjtalk.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://pcgamer.com) [PCGamer](https://pcgamer.com)
1. ![](https://www.google.com/s2/favicons?domain=https://psnprofiles.com/) [PSNProfiles.com](https://psnprofiles.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://packagist.org/) [Packagist](https://packagist.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://pastebin.com/) [Pastebin](https://pastebin.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.patreon.com/) [Patreon](https://www.patreon.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://pentesterlab.com/) [PentesterLab](https://pentesterlab.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.pepper.it) [PepperIT](https://www.pepper.it)
1. ![](https://www.google.com/s2/favicons?domain=https://www.periscope.tv/) [Periscope](https://www.periscope.tv/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.pinkbike.com/) [Pinkbike](https://www.pinkbike.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://play.google.com/store) [PlayStore](https://play.google.com/store)
1. ![](https://www.google.com/s2/favicons?domain=https://pocketstars.com/) [PocketStars](https://pocketstars.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://pokemonshowdown.com) [Pokemon Showdown](https://pokemonshowdown.com)
1. ![](https://www.google.com/s2/favicons?domain=https://polarsteps.com/) [Polarsteps](https://polarsteps.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.polygon.com/) [Polygon](https://www.polygon.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://polymart.org/) [Polymart](https://polymart.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://pornhub.com/) [Pornhub](https://pornhub.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.producthunt.com/) [ProductHunt](https://www.producthunt.com/)
1. ![](https://www.google.com/s2/favicons?domain=http://promodj.com/) [PromoDJ](http://promodj.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://pypi.org) [PyPi](https://pypi.org)
1. ![](https://www.google.com/s2/favicons?domain=https://www.rajce.idnes.cz/) [Rajce.net](https://www.rajce.idnes.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://rateyourmusic.com/) [Rate Your Music](https://rateyourmusic.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.rclone.org/) [Rclone Forum](https://forum.rclone.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.redtube.com/) [RedTube](https://www.redtube.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.redbubble.com/) [Redbubble](https://www.redbubble.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.reddit.com/) [Reddit](https://www.reddit.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.reisefrage.net/) [Reisefrage](https://www.reisefrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://replit.com/) [Replit.com](https://replit.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.researchgate.net/) [ResearchGate](https://www.researchgate.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.reverbnation.com/) [ReverbNation](https://www.reverbnation.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.roblox.com/) [Roblox](https://www.roblox.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.rockettube.com/) [RocketTube](https://www.rockettube.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://royalcams.com) [RoyalCams](https://royalcams.com)
1. ![](https://www.google.com/s2/favicons?domain=https://rubygems.org/) [RubyGems](https://rubygems.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://rumble.com/) [Rumble](https://rumble.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.runescape.com/) [RuneScape](https://www.runescape.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://swapd.co/) [SWAPD](https://swapd.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.sbazar.cz/) [Sbazar.cz](https://www.sbazar.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://scratch.mit.edu/) [Scratch](https://scratch.mit.edu/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.scribd.com/) [Scribd](https://www.scribd.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.shitpostbot.com/) [ShitpostBot5000](https://www.shitpostbot.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.shpock.com/) [Shpock](https://www.shpock.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.signalusers.org) [Signal](https://community.signalusers.org)
1. ![](https://www.google.com/s2/favicons?domain=https://sketchfab.com/) [Sketchfab](https://sketchfab.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://slack.com) [Slack](https://slack.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.slant.co/) [Slant](https://www.slant.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://slashdot.org) [Slashdot](https://slashdot.org)
1. ![](https://www.google.com/s2/favicons?domain=https://slideshare.net/) [SlideShare](https://slideshare.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://slides.com/) [Slides](https://slides.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://smugmug.com) [SmugMug](https://smugmug.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.smule.com/) [Smule](https://www.smule.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.snapchat.com) [Snapchat](https://www.snapchat.com)
1. ![](https://www.google.com/s2/favicons?domain=https://soundcloud.com/) [SoundCloud](https://soundcloud.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://sourceforge.net/) [SourceForge](https://sourceforge.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://soylentnews.org) [SoylentNews](https://soylentnews.org)
1. ![](https://www.google.com/s2/favicons?domain=https://speedrun.com/) [Speedrun.com](https://speedrun.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://spells8.com) [Spells8](https://spells8.com)
1. ![](https://www.google.com/s2/favicons?domain=https://splice.com/) [Splice](https://splice.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://splits.io) [Splits.io](https://splits.io)
1. ![](https://www.google.com/s2/favicons?domain=https://www.sporcle.com/) [Sporcle](https://www.sporcle.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.sportlerfrage.net/) [Sportlerfrage](https://www.sportlerfrage.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.sports.ru/) [SportsRU](https://www.sports.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://open.spotify.com/) [Spotify](https://open.spotify.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://robertsspaceindustries.com/) [Star Citizen](https://robertsspaceindustries.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://steamcommunity.com/) [Steam Community (Group)](https://steamcommunity.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://steamcommunity.com/) [Steam Community (User)](https://steamcommunity.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.strava.com/) [Strava](https://www.strava.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.sublimetext.com/) [SublimeForum](https://forum.sublimetext.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://tetr.io) [TETR.IO](https://tetr.io)
1. ![](https://www.google.com/s2/favicons?domain=https://tldrlegal.com/) [TLDR Legal](https://tldrlegal.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://traktrain.com/) [TRAKTRAIN](https://traktrain.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://t.me/) [Telegram](https://t.me/)
1. ![](https://www.google.com/s2/favicons?domain=https://tellonym.me/) [Tellonym.me](https://tellonym.me/)
1. ![](https://www.google.com/s2/favicons?domain=https://tenor.com/) [Tenor](https://tenor.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://themeforest.net/) [ThemeForest](https://themeforest.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.tnaflix.com/) [TnAFlix](https://www.tnaflix.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://torrentgalaxy.to/) [TorrentGalaxy](https://torrentgalaxy.to/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.tradingview.com/) [TradingView](https://www.tradingview.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.trakt.tv/) [Trakt](https://www.trakt.tv/)
1. ![](https://www.google.com/s2/favicons?domain=https://trashbox.ru/) [TrashboxRU](https://trashbox.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://traewelling.de/) [Trawelling](https://traewelling.de/)
1. ![](https://www.google.com/s2/favicons?domain=https://trello.com/) [Trello](https://trello.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://tryhackme.com/) [TryHackMe](https://tryhackme.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://tuna.voicemod.net/) [Tuna](https://tuna.voicemod.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://tweakers.net) [Tweakers](https://tweakers.net)
1. ![](https://www.google.com/s2/favicons?domain=https://www.twitch.tv/) [Twitch](https://www.twitch.tv/)
1. ![](https://www.google.com/s2/favicons?domain=https://twitter.com/) [Twitter](https://twitter.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://typeracer.com) [Typeracer](https://typeracer.com)
1. ![](https://www.google.com/s2/favicons?domain=https://ultimate-guitar.com/) [Ultimate-Guitar](https://ultimate-guitar.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://unsplash.com/) [Unsplash](https://unsplash.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://untappd.com/) [Untappd](https://untappd.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://vk.com/) [VK](https://vk.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://vsco.co/) [VSCO](https://vsco.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.velomania.ru/) [Velomania](https://forum.velomania.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://venmo.com/) [Venmo](https://venmo.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://vero.co/) [Vero](https://vero.co/)
1. ![](https://www.google.com/s2/favicons?domain=https://vimeo.com/) [Vimeo](https://vimeo.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.virustotal.com/) [VirusTotal](https://www.virustotal.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://discourse.wicg.io/) [WICG Forum](https://discourse.wicg.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.warriorforum.com/) [Warrior Forum](https://www.warriorforum.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.wattpad.com/) [Wattpad](https://www.wattpad.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.webnode.cz/) [WebNode](https://www.webnode.cz/)
1. ![](https://www.google.com/s2/favicons?domain=https://hosted.weblate.org/) [Weblate](https://hosted.weblate.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://weebly.com/) [Weebly](https://weebly.com/)
1. ![](https://www.google.com/s2/favicons?domain=http://www.wikidot.com/) [Wikidot](http://www.wikidot.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.wikipedia.org/) [Wikipedia](https://www.wikipedia.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://windy.com/) [Windy](https://windy.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://wix.com/) [Wix](https://wix.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://community.wolfram.com/) [WolframalphaForum](https://community.wolfram.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://wordpress.com) [WordPress](https://wordpress.com)
1. ![](https://www.google.com/s2/favicons?domain=https://wordpress.org/) [WordPressOrg](https://wordpress.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.wordnik.com/) [Wordnik](https://www.wordnik.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.wykop.pl) [Wykop](https://www.wykop.pl)
1. ![](https://www.google.com/s2/favicons?domain=https://xboxgamertag.com/) [Xbox Gamertag](https://xboxgamertag.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://xvideos.com/) [Xvideos](https://xvideos.com/) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://music.yandex) [YandexMusic](https://music.yandex)
1. ![](https://www.google.com/s2/favicons?domain=https://www.younow.com/) [YouNow](https://www.younow.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://youpic.com/) [YouPic](https://youpic.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://youporn.com) [YouPorn](https://youporn.com) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://www.youtube.com/) [YouTube](https://www.youtube.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.zhihu.com/) [Zhihu](https://www.zhihu.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://akniga.org/profile/blue/) [akniga](https://akniga.org/profile/blue/)
1. ![](https://www.google.com/s2/favicons?domain=http://www.authorstream.com/) [authorSTREAM](http://www.authorstream.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.baby.ru/) [babyRU](https://www.baby.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.babyblog.ru/) [babyblogRU](https://www.babyblog.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://chaos.social/) [chaos.social](https://chaos.social/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.couchsurfing.com/) [couchsurfing](https://www.couchsurfing.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://d3.ru/) [d3RU](https://d3.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.dailykos.com) [dailykos](https://www.dailykos.com)
1. ![](https://www.google.com/s2/favicons?domain=http://dating.ru) [datingRU](http://dating.ru)
1. ![](https://www.google.com/s2/favicons?domain=https://devrant.com/) [devRant](https://devrant.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.drive2.ru/) [drive2](https://www.drive2.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://egpu.io/) [eGPU](https://egpu.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://eintracht.de) [eintracht](https://eintracht.de)
1. ![](https://www.google.com/s2/favicons?domain=https://www.fixya.com) [fixya](https://www.fixya.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.fl.ru/) [fl](https://www.fl.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.guns.ru/) [forum_guns](https://forum.guns.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.freecodecamp.org/) [freecodecamp](https://www.freecodecamp.org/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.furaffinity.net) [furaffinity](https://www.furaffinity.net)
1. ![](https://www.google.com/s2/favicons?domain=https://www.geocaching.com/) [geocaching](https://www.geocaching.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://gfycat.com/) [gfycat](https://gfycat.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://habr.com/) [habr](https://habr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.hackster.io) [hackster](https://www.hackster.io)
1. ![](https://www.google.com/s2/favicons?domain=https://www.hunting.ru/forum/) [hunting](https://www.hunting.ru/forum/)
1. ![](https://www.google.com/s2/favicons?domain=https://imgsrc.ru/) [iMGSRC.RU](https://imgsrc.ru/)
1. ![](https://www.google.com/s2/favicons?domain=http://forum.igromania.ru/) [igromania](http://forum.igromania.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.interpals.net/) [interpals](https://www.interpals.net/)
1. ![](https://www.google.com/s2/favicons?domain=https://irecommend.ru/) [irecommend](https://irecommend.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://jbzd.com.pl/) [jbzd.com.pl](https://jbzd.com.pl/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.jeuxvideo.com) [jeuxvideo](https://www.jeuxvideo.com)
1. ![](https://www.google.com/s2/favicons?domain=https://ko-fi.com) [kofi](https://ko-fi.com)
1. ![](https://www.google.com/s2/favicons?domain=https://www.kwork.ru/) [kwork](https://www.kwork.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://lab.pentestit.ru/) [labpentestit](https://lab.pentestit.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://last.fm/) [last.fm](https://last.fm/)
1. ![](https://www.google.com/s2/favicons?domain=https://forum.leasehackr.com/) [leasehackr](https://forum.leasehackr.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.livelib.ru/) [livelib](https://www.livelib.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://mastodon.cloud/) [mastodon.cloud](https://mastodon.cloud/)
1. ![](https://www.google.com/s2/favicons?domain=https://chaos.social/) [mastodon.social](https://chaos.social/)
1. ![](https://www.google.com/s2/favicons?domain=https://mastodon.xyz/) [mastodon.technology](https://mastodon.xyz/)
1. ![](https://www.google.com/s2/favicons?domain=https://mastodon.xyz/) [mastodon.xyz](https://mastodon.xyz/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.mercadolivre.com.br) [mercadolivre](https://www.mercadolivre.com.br)
1. ![](https://www.google.com/s2/favicons?domain=https://www.minds.com) [minds](https://www.minds.com)
1. ![](https://www.google.com/s2/favicons?domain=https://moikrug.ru/) [moikrug](https://moikrug.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://mstdn.io/) [mstdn.io](https://mstdn.io/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.nairaland.com/) [nairaland.com](https://www.nairaland.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.nn.ru/) [nnRU](https://www.nn.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://note.com/) [note](https://note.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.npmjs.com/) [npm](https://www.npmjs.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.opennet.ru/) [opennet](https://www.opennet.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://osu.ppy.sh/) [osu!](https://osu.ppy.sh/)
1. ![](https://www.google.com/s2/favicons?domain=https://php.ru/forum/) [phpRU](https://php.ru/forum/)
1. ![](https://www.google.com/s2/favicons?domain=https://pikabu.ru/) [pikabu](https://pikabu.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://pr0gramm.com/) [pr0gramm](https://pr0gramm.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://prog.hu/) [prog.hu](https://prog.hu/)
1. ![](https://www.google.com/s2/favicons?domain=https://queer.af/) [queer.af](https://queer.af/)
1. ![](https://www.google.com/s2/favicons?domain=https://satsis.info/) [satsisRU](https://satsis.info/)
1. ![](https://www.google.com/s2/favicons?domain=https://sessionize.com/) [sessionize](https://sessionize.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://skyrock.com/) [skyrock](https://skyrock.com/)
1. ![](https://www.google.com/s2/favicons?domain=https://social.tchncs.de/) [social.tchncs.de](https://social.tchncs.de/)
1. ![](https://www.google.com/s2/favicons?domain=https://spletnik.ru/) [spletnik](https://spletnik.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.svidbook.ru/) [svidbook](https://www.svidbook.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://www.toster.ru/) [toster](https://www.toster.ru/)
1. ![](https://www.google.com/s2/favicons?domain=https://uid.me/) [uid](https://uid.me/)
1. ![](https://www.google.com/s2/favicons?domain=https://wiki.vg/) [wiki.vg](https://wiki.vg/)
1. ![](https://www.google.com/s2/favicons?domain=https://xhamster.com) [xHamster](https://xhamster.com) **(NSFW)**
1. ![](https://www.google.com/s2/favicons?domain=https://znanylekarz.pl) [znanylekarz.pl](https://znanylekarz.pl)

View File

@ -1,51 +0,0 @@
import os
import json
import urllib
import pytest
from sherlock_project.sites import SitesInformation
def fetch_local_manifest(honor_exclusions: bool = True) -> dict[str, dict[str, str]]:
sites_obj = SitesInformation(data_file_path=os.path.join(os.path.dirname(__file__), "../sherlock_project/resources/data.json"), honor_exclusions=honor_exclusions)
sites_iterable: dict[str, dict[str, str]] = {site.name: site.information for site in sites_obj}
return sites_iterable
@pytest.fixture()
def sites_obj():
sites_obj = SitesInformation(data_file_path=os.path.join(os.path.dirname(__file__), "../sherlock_project/resources/data.json"))
yield sites_obj
@pytest.fixture(scope="session")
def sites_info():
yield fetch_local_manifest()
@pytest.fixture(scope="session")
def remote_schema():
schema_url: str = 'https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock_project/resources/data.schema.json'
with urllib.request.urlopen(schema_url) as remoteschema:
schemadat = json.load(remoteschema)
yield schemadat
def pytest_addoption(parser):
parser.addoption(
"--chunked-sites",
action="store",
default=None,
help="For tests utilizing chunked sites, include only the (comma-separated) site(s) specified.",
)
def pytest_generate_tests(metafunc):
if "chunked_sites" in metafunc.fixturenames:
sites_info = fetch_local_manifest(honor_exclusions=False)
# Ingest and apply site selections
site_filter: str | None = metafunc.config.getoption("--chunked-sites")
if site_filter:
selected_sites: list[str] = [site.strip() for site in site_filter.split(",")]
sites_info = {
site: data for site, data in sites_info.items()
if site in selected_sites
}
params = [{name: data} for name, data in sites_info.items()]
ids = list(sites_info.keys())
metafunc.parametrize("chunked_sites", params, ids=ids)

View File

@ -1,7 +0,0 @@
import sherlock_project
#from sherlock.sites import SitesInformation
#local_manifest = data_file_path=os.path.join(os.path.dirname(__file__), "../sherlock/resources/data.json")
def test_username_via_message():
sherlock_project.__main__("--version")

View File

@ -1,38 +0,0 @@
import os
import platform
import re
import subprocess
class Interactives:
def run_cli(args:str = "") -> str:
"""Pass arguments to Sherlock as a normal user on the command line"""
# Adapt for platform differences (Windows likes to be special)
if platform.system() == "Windows":
command:str = f"py -m sherlock_project {args}"
else:
command:str = f"sherlock {args}"
proc_out:str = ""
try:
proc_out = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
return proc_out.decode()
except subprocess.CalledProcessError as e:
raise InteractivesSubprocessError(e.output.decode())
def walk_sherlock_for_files_with(pattern: str) -> list[str]:
"""Check all files within the Sherlock package for matching patterns"""
pattern:re.Pattern = re.compile(pattern)
matching_files:list[str] = []
for root, dirs, files in os.walk("sherlock_project"):
for file in files:
file_path = os.path.join(root,file)
if "__pycache__" in file_path:
continue
with open(file_path, 'r', errors='ignore') as f:
if pattern.search(f.read()):
matching_files.append(file_path)
return matching_files
class InteractivesSubprocessError(Exception):
pass

View File

@ -1,39 +0,0 @@
import os
import json
import pytest
from jsonschema import validate
def test_validate_manifest_against_local_schema():
"""Ensures that the manifest matches the local schema, for situations where the schema is being changed."""
json_relative: str = '../sherlock_project/resources/data.json'
schema_relative: str = '../sherlock_project/resources/data.schema.json'
json_path: str = os.path.join(os.path.dirname(__file__), json_relative)
schema_path: str = os.path.join(os.path.dirname(__file__), schema_relative)
with open(json_path, 'r') as f:
jsondat = json.load(f)
with open(schema_path, 'r') as f:
schemadat = json.load(f)
validate(instance=jsondat, schema=schemadat)
@pytest.mark.online
def test_validate_manifest_against_remote_schema(remote_schema):
"""Ensures that the manifest matches the remote schema, so as to not unexpectedly break clients."""
json_relative: str = '../sherlock_project/resources/data.json'
json_path: str = os.path.join(os.path.dirname(__file__), json_relative)
with open(json_path, 'r') as f:
jsondat = json.load(f)
validate(instance=jsondat, schema=remote_schema)
# Ensure that the expected values are beind returned by the site list
@pytest.mark.parametrize("target_name,target_expected_err_type", [
('GitHub', 'status_code'),
('GitLab', 'message'),
])
def test_site_list_iterability (sites_info, target_name, target_expected_err_type):
assert sites_info[target_name]['errorType'] == target_expected_err_type

View File

@ -1,105 +0,0 @@
import pytest
import random
import string
import re
from sherlock_project.sherlock import sherlock
from sherlock_project.notify import QueryNotify
from sherlock_project.result import QueryStatus
#from sherlock_interactives import Interactives
def simple_query(sites_info: dict, site: str, username: str) -> QueryStatus:
query_notify = QueryNotify()
site_data: dict = {}
site_data[site] = sites_info[site]
return sherlock(
username=username,
site_data=site_data,
query_notify=query_notify,
)[site]['status'].status
@pytest.mark.online
class TestLiveTargets:
"""Actively test probes against live and trusted targets"""
# Known positives should only use sites trusted to be reliable and unchanging
@pytest.mark.parametrize('site,username',[
('GitLab', 'ppfeister'),
('AllMyLinks', 'blue'),
])
def test_known_positives_via_message(self, sites_info, site, username):
assert simple_query(sites_info=sites_info, site=site, username=username) is QueryStatus.CLAIMED
# Known positives should only use sites trusted to be reliable and unchanging
@pytest.mark.parametrize('site,username',[
('GitHub', 'ppfeister'),
('GitHub', 'sherlock-project'),
('Docker Hub', 'ppfeister'),
('Docker Hub', 'sherlock'),
])
def test_known_positives_via_status_code(self, sites_info, site, username):
assert simple_query(sites_info=sites_info, site=site, username=username) is QueryStatus.CLAIMED
# Known positives should only use sites trusted to be reliable and unchanging
@pytest.mark.parametrize('site,username',[
('Keybase', 'blue'),
('devRant', 'blue'),
])
def test_known_positives_via_response_url(self, sites_info, site, username):
assert simple_query(sites_info=sites_info, site=site, username=username) is QueryStatus.CLAIMED
# Randomly generate usernames of high length and test for positive availability
# Randomly generated usernames should be simple alnum for simplicity and high
# compatibility. Several attempts may be made ~just in case~ a real username is
# generated.
@pytest.mark.parametrize('site,random_len',[
('GitLab', 255),
('Codecademy', 30)
])
def test_likely_negatives_via_message(self, sites_info, site, random_len):
num_attempts: int = 3
attempted_usernames: list[str] = []
status: QueryStatus = QueryStatus.CLAIMED
for i in range(num_attempts):
acceptable_types = string.ascii_letters + string.digits
random_handle = ''.join(random.choice(acceptable_types) for _ in range (random_len))
attempted_usernames.append(random_handle)
status = simple_query(sites_info=sites_info, site=site, username=random_handle)
if status is QueryStatus.AVAILABLE:
break
assert status is QueryStatus.AVAILABLE, f"Could not validate available username after {num_attempts} attempts with randomly generated usernames {attempted_usernames}."
# Randomly generate usernames of high length and test for positive availability
# Randomly generated usernames should be simple alnum for simplicity and high
# compatibility. Several attempts may be made ~just in case~ a real username is
# generated.
@pytest.mark.parametrize('site,random_len',[
('GitHub', 39),
('Docker Hub', 30)
])
def test_likely_negatives_via_status_code(self, sites_info, site, random_len):
num_attempts: int = 3
attempted_usernames: list[str] = []
status: QueryStatus = QueryStatus.CLAIMED
for i in range(num_attempts):
acceptable_types = string.ascii_letters + string.digits
random_handle = ''.join(random.choice(acceptable_types) for _ in range (random_len))
attempted_usernames.append(random_handle)
status = simple_query(sites_info=sites_info, site=site, username=random_handle)
if status is QueryStatus.AVAILABLE:
break
assert status is QueryStatus.AVAILABLE, f"Could not validate available username after {num_attempts} attempts with randomly generated usernames {attempted_usernames}."
def test_username_illegal_regex(sites_info):
site: str = 'BitBucket'
invalid_handle: str = '*#$Y&*JRE'
pattern = re.compile(sites_info[site]['regexCheck'])
# Ensure that the username actually fails regex before testing sherlock
assert pattern.match(invalid_handle) is None
assert simple_query(sites_info=sites_info, site=site, username=invalid_handle) is QueryStatus.ILLEGAL

View File

@ -1,43 +0,0 @@
import pytest
from sherlock_project import sherlock
from sherlock_interactives import Interactives
from sherlock_interactives import InteractivesSubprocessError
def test_remove_nsfw(sites_obj):
nsfw_target: str = 'Pornhub'
assert nsfw_target in {site.name: site.information for site in sites_obj}
sites_obj.remove_nsfw_sites()
assert nsfw_target not in {site.name: site.information for site in sites_obj}
# Parametrized sites should *not* include Motherless, which is acting as the control
@pytest.mark.parametrize('nsfwsites', [
['Pornhub'],
['Pornhub', 'Xvideos'],
])
def test_nsfw_explicit_selection(sites_obj, nsfwsites):
for site in nsfwsites:
assert site in {site.name: site.information for site in sites_obj}
sites_obj.remove_nsfw_sites(do_not_remove=nsfwsites)
for site in nsfwsites:
assert site in {site.name: site.information for site in sites_obj}
assert 'Motherless' not in {site.name: site.information for site in sites_obj}
def test_wildcard_username_expansion():
assert sherlock.check_for_parameter('test{?}test') is True
assert sherlock.check_for_parameter('test{.}test') is False
assert sherlock.check_for_parameter('test{}test') is False
assert sherlock.check_for_parameter('testtest') is False
assert sherlock.check_for_parameter('test{?test') is False
assert sherlock.check_for_parameter('test?}test') is False
assert sherlock.multiple_usernames('test{?}test') == ["test_test" , "test-test" , "test.test"]
@pytest.mark.parametrize('cliargs', [
'',
'--site urghrtuight --egiotr',
'--',
])
def test_no_usernames_provided(cliargs):
with pytest.raises(InteractivesSubprocessError, match=r"error: the following arguments are required: USERNAMES"):
Interactives.run_cli(cliargs)

View File

@ -1,100 +0,0 @@
import pytest
import re
import rstr
from sherlock_project.sherlock import sherlock
from sherlock_project.notify import QueryNotify
from sherlock_project.result import QueryResult, QueryStatus
FALSE_POSITIVE_ATTEMPTS: int = 2 # Since the usernames are randomly generated, it's POSSIBLE that a real username can be hit
FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND: int = 15 # If a pattern uses quantifiers such as `+` `*` or `{n,}`, limit the upper bound (0 to disable)
FALSE_POSITIVE_DEFAULT_PATTERN: str = r'^[a-zA-Z0-9]{7,20}$' # Used in absence of a regexCheck entry
def set_pattern_upper_bound(pattern: str, upper_bound: int = FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND) -> str:
"""Set upper bound for regex patterns that use quantifiers such as `+` `*` or `{n,}`."""
def replace_upper_bound(match: re.Match) -> str: # type: ignore
lower_bound: int = int(match.group(1)) if match.group(1) else 0 # type: ignore
nonlocal upper_bound
upper_bound = upper_bound if lower_bound < upper_bound else lower_bound # type: ignore # noqa: F823
return f'{{{lower_bound},{upper_bound}}}'
pattern = re.sub(r'(?<!\\)\{(\d+),\}', replace_upper_bound, pattern) # {n,} # type: ignore
pattern = re.sub(r'(?<!\\)\+', f'{{1,{upper_bound}}}', pattern) # +
pattern = re.sub(r'(?<!\\)\*', f'{{0,{upper_bound}}}', pattern) # *
return pattern
def false_positive_check(sites_info: dict[str, dict[str, str]], site: str, pattern: str) -> QueryStatus:
"""Check if a site is likely to produce false positives."""
status: QueryStatus = QueryStatus.UNKNOWN
for _ in range(FALSE_POSITIVE_ATTEMPTS):
query_notify: QueryNotify = QueryNotify()
username: str = rstr.xeger(pattern)
result: QueryResult | str = sherlock(
username=username,
site_data=sites_info,
query_notify=query_notify,
)[site]['status']
if not hasattr(result, 'status'):
raise TypeError(f"Result for site {site} does not have 'status' attribute. Actual result: {result}")
if type(result.status) is not QueryStatus: # type: ignore
raise TypeError(f"Result status for site {site} is not of type QueryStatus. Actual type: {type(result.status)}") # type: ignore
status = result.status # type: ignore
if status in (QueryStatus.AVAILABLE, QueryStatus.WAF):
return status
return status
def false_negative_check(sites_info: dict[str, dict[str, str]], site: str) -> QueryStatus:
"""Check if a site is likely to produce false negatives."""
status: QueryStatus = QueryStatus.UNKNOWN
query_notify: QueryNotify = QueryNotify()
result: QueryResult | str = sherlock(
username=sites_info[site]['username_claimed'],
site_data=sites_info,
query_notify=query_notify,
)[site]['status']
if not hasattr(result, 'status'):
raise TypeError(f"Result for site {site} does not have 'status' attribute. Actual result: {result}")
if type(result.status) is not QueryStatus: # type: ignore
raise TypeError(f"Result status for site {site} is not of type QueryStatus. Actual type: {type(result.status)}") # type: ignore
status = result.status # type: ignore
return status
@pytest.mark.validate_targets
@pytest.mark.online
class Test_All_Targets:
@pytest.mark.validate_targets_fp
def test_false_pos(self, chunked_sites: dict[str, dict[str, str]]):
"""Iterate through all sites in the manifest to discover possible false-positive inducting targets."""
pattern: str
for site in chunked_sites:
try:
pattern = chunked_sites[site]['regexCheck']
except KeyError:
pattern = FALSE_POSITIVE_DEFAULT_PATTERN
if FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND > 0:
pattern = set_pattern_upper_bound(pattern)
result: QueryStatus = false_positive_check(chunked_sites, site, pattern)
assert result is QueryStatus.AVAILABLE, f"{site} produced false positive with pattern {pattern}, result was {result}"
@pytest.mark.validate_targets_fn
def test_false_neg(self, chunked_sites: dict[str, dict[str, str]]):
"""Iterate through all sites in the manifest to discover possible false-negative inducting targets."""
for site in chunked_sites:
result: QueryStatus = false_negative_check(chunked_sites, site)
assert result is QueryStatus.CLAIMED, f"{site} produced false negative, result was {result}"

View File

@ -1,16 +0,0 @@
import os
from sherlock_interactives import Interactives
import sherlock_project
def test_versioning() -> None:
# Ensure __version__ matches version presented to the user
assert sherlock_project.__version__ in Interactives.run_cli("--version")
# Ensure __init__ is single source of truth for __version__ in package
# Temporarily allows sherlock.py so as to not trigger early upgrades
found:list = Interactives.walk_sherlock_for_files_with(r'__version__ *= *')
expected:list = [
# Normalization is REQUIRED for Windows ( / vs \ )
os.path.normpath("sherlock_project/__init__.py"),
]
# Sorting is REQUIRED for Mac
assert sorted(found) == sorted(expected)

42
tox.ini
View File

@ -1,42 +0,0 @@
[tox]
requires =
tox >= 3
envlist =
lint
py313
py312
py311
py310
[testenv]
description = Attempt to build and install the package
deps =
coverage
jsonschema
pytest
rstr
allowlist_externals = coverage
commands =
coverage run --source=sherlock_project --module pytest -v
coverage report --show-missing
[testenv:offline]
deps =
jsonschema
pytest
commands =
pytest -v -m "not online"
[testenv:lint]
description = Lint with Ruff
deps =
ruff
commands =
ruff check
[gh-actions]
python =
3.13: py313
3.12: py312
3.11: py311
3.10: py310