07070100000000000081A400000000000000000000000166E1887C00000137000000000000000000000000000000000000003300000000eventlet-0.37.0+git.1726056572.8637820/.coveragerc [run]
branch = True
source = eventlet
# concurrency=eventlet gives 0% report on CPython and start error on pypy
#concurrency = eventlet
omit =
eventlet/support/dns/*
tests/*
[report]
exclude_lines =
pragma: no cover
raise NotImplementedError
if __name__ == .__main__.:
ignore_errors = True
07070100000001000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000002F00000000eventlet-0.37.0+git.1726056572.8637820/.github 07070100000002000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000003900000000eventlet-0.37.0+git.1726056572.8637820/.github/workflows 07070100000003000081A400000000000000000000000166E1887C000001BF000000000000000000000000000000000000004300000000eventlet-0.37.0+git.1726056572.8637820/.github/workflows/docs.yaml name: Build Docs
on: [push, pull_request]
permissions:
id-token: write
jobs:
build-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install tox tox-gh-actions
- name: Build docs
run: tox -e docs
07070100000004000081A400000000000000000000000166E1887C0000045E000000000000000000000000000000000000004600000000eventlet-0.37.0+git.1726056572.8637820/.github/workflows/publish.yaml # This workflow will upload a Python Package using Twine when a release is created
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
name: Build Package & Optional Deploy
on: [push, pull_request]
permissions:
id-token: write
jobs:
build-package-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: python -m build
- name: Publish package
# deploy only when a new tag is pushed to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
uses: pypa/gh-action-pypi-publish@release/v1
07070100000005000081A400000000000000000000000166E1887C000004A9000000000000000000000000000000000000004400000000eventlet-0.37.0+git.1726056572.8637820/.github/workflows/style.yaml name: style
on:
push:
pull_request:
jobs:
style:
runs-on: ubuntu-latest
# https://github.community/t/duplicate-checks-on-push-and-pull-request-simultaneous-event/18012/5
if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != 'eventlet/eventlet'
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- name: cache pip
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('.github/workflows/style.yaml') }}
restore-keys: |
${{ runner.os }}-pip-
${{ runner.os }}-
- name: cache tox
uses: actions/cache@v3
with:
path: .tox
key: ${{ runner.os }}-tox-style-${{ hashFiles('tox.ini') }}
restore-keys: |
${{ runner.os }}-tox-style-
${{ runner.os }}-tox-
${{ runner.os }}-
- name: setup python
uses: actions/setup-python@v3
with:
python-version: 3.x
- name: install tox
run: pip install tox
- name: run tests
run: |
tox --verbose -e pep8
tox --verbose -e lint
07070100000006000081A400000000000000000000000166E1887C00001548000000000000000000000000000000000000004300000000eventlet-0.37.0+git.1726056572.8637820/.github/workflows/test.yaml name: test
on:
push:
pull_request:
schedule:
- cron: "43 7 */14 * *" # every two weeks, time chosen by RNG
jobs:
tox:
name: "tox ${{ matrix.toxenv }}"
continue-on-error: ${{ matrix.ignore-error }}
runs-on: ${{ matrix.os }}
# https://github.community/t/duplicate-checks-on-push-and-pull-request-simultaneous-event/18012/5
if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != 'eventlet/eventlet'
timeout-minutes: 10
services:
mysql:
image: mysql:5.7
env: { MYSQL_ALLOW_EMPTY_PASSWORD: yes }
ports: ["3306:3306"]
options: --health-cmd="mysqladmin ping" --health-timeout=5s --health-retries=5 --health-interval=5s
postgres:
image: postgres:13
env: { POSTGRES_PASSWORD: "secret" }
ports: ["5432:5432"]
options: --health-cmd pg_isready --health-timeout 5s --health-retries 5 --health-interval 5s
env:
EVENTLET_DB_TEST_AUTH: '{"psycopg2": {"host": "127.0.0.1", "port": 5432, "user": "postgres", "password": "secret"}, "MySQLdb": {"host": "127.0.0.1", "port": 3306, "passwd": "", "user": "root"}}'
strategy:
fail-fast: false
matrix:
include:
- { py: 3.7, toxenv: py37-epolls, ignore-error: false, os: ubuntu-latest }
- { py: 3.8, toxenv: py38-epolls, ignore-error: false, os: ubuntu-latest }
- { py: 3.8, toxenv: py38-openssl, ignore-error: false, os: ubuntu-latest }
- { py: 3.8, toxenv: py38-poll, ignore-error: false, os: ubuntu-latest }
- { py: 3.8, toxenv: py38-selects, ignore-error: false, os: ubuntu-latest }
- { py: 3.9, toxenv: py39-epolls, ignore-error: false, os: ubuntu-latest }
- { py: 3.9, toxenv: py39-poll, ignore-error: false, os: ubuntu-latest }
- { py: 3.9, toxenv: py39-selects, ignore-error: false, os: ubuntu-latest }
- { py: 3.9, toxenv: py39-dnspython1, ignore-error: false, os: ubuntu-latest }
- { py: "3.10", toxenv: py310-epolls, ignore-error: false, os: ubuntu-latest }
- { py: "3.10", toxenv: py310-poll, ignore-error: false, os: ubuntu-latest }
- { py: "3.10", toxenv: py310-selects, ignore-error: false, os: ubuntu-latest }
- { py: "3.10", toxenv: ipv6, ignore-error: false, os: ubuntu-latest }
- { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest }
- { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest }
- { py: "3.7", toxenv: py37-asyncio, ignore-error: false, os: ubuntu-latest }
- { py: "3.8", toxenv: py38-asyncio, ignore-error: false, os: ubuntu-latest }
- { py: "3.9", toxenv: py39-asyncio, ignore-error: false, os: ubuntu-latest }
- { py: "3.10", toxenv: py310-asyncio, ignore-error: false, os: ubuntu-latest }
- { py: "3.11", toxenv: py311-asyncio, ignore-error: false, os: ubuntu-latest }
- { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: ubuntu-latest }
- { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 }
steps:
- name: install system packages
run: sudo apt install -y --no-install-recommends ccache libffi-dev default-libmysqlclient-dev libpq-dev libssl-dev libzmq3-dev
- uses: actions/checkout@v3
- name: cache pip
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ matrix.toxenv }}-${{ hashFiles('.github/workflows/test.yaml', 'setup.py') }}
restore-keys: |
${{ runner.os }}-pip-
${{ runner.os }}-
- name: cache tox
uses: actions/cache@v3
with:
path: .tox
key: ${{ runner.os }}-tox-${{ matrix.toxenv }}-${{ hashFiles('tox.ini') }}
restore-keys: |
${{ runner.os }}-tox-
${{ runner.os }}-
- name: setup python ${{ matrix.py }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.py }}
- name: install codecov, tox
run: pip install codecov tox
- run: env
- name: run tests
run: tox --verbose --verbose -e ${{ matrix.toxenv }}
- name: codecov
run: codecov --flags=$(echo ${{ matrix.toxenv }} |tr -d -- '-.')
tox-macos:
name: "macOS tox ${{ matrix.toxenv }}"
continue-on-error: ${{ matrix.ignore-error }}
runs-on: ${{ matrix.os }}
# https://github.community/t/duplicate-checks-on-push-and-pull-request-simultaneous-event/18012/5
if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != 'eventlet/eventlet'
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
include:
- { py: "3.12", toxenv: py312-asyncio, ignore-error: false, os: macos-latest }
# This isn't working very well at the moment, but that might just be
# tox config? In any case main focus is on asyncio so someone can
# revisit separately.
#- { py: "3.12", toxenv: py312-kqueue, ignore-error: false, os: macos-latest }
steps:
- uses: actions/checkout@v3
- name: install codecov, tox
run: pip install codecov tox
- run: env
- name: run tests
run: tox --verbose --verbose -e ${{ matrix.toxenv }}
- name: codecov
run: codecov --flags=$(echo ${{ matrix.toxenv }} |tr -d -- '-.')
07070100000007000081A400000000000000000000000166E1887C000000D3000000000000000000000000000000000000003200000000eventlet-0.37.0+git.1726056572.8637820/.gitignore # please keep this file sorted
*.egg*
*.pyc
.venv
build/
doc/source/changelog.rst
.coverage
coverage.xml
dist/
doc/changelog.rst
venv*
website-build/
.ruff_cache/
# auto-generated by hatch
eventlet/_version.py
07070100000008000081A400000000000000000000000166E1887C00000359000000000000000000000000000000000000003900000000eventlet-0.37.0+git.1726056572.8637820/.readthedocs.yaml # .readthedocs.yaml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.12"
# You can also specify other tool versions:
# nodejs: "19"
# rust: "1.64"
# golang: "1.19"
# Build documentation in the "docs/" directory with Sphinx
sphinx:
configuration: doc/source/conf.py
# Optionally build your docs in additional formats such as PDF and ePub
# formats:
# - pdf
# - epub
# Optional but recommended, declare the Python requirements required
# to build your documentation
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: doc/requirements.txt
- method: pip
path: .
07070100000009000081A400000000000000000000000166E1887C0000183B000000000000000000000000000000000000002F00000000eventlet-0.37.0+git.1726056572.8637820/AUTHORS Maintainer (i.e., Who To Hassle If You Find Bugs)
-------------------------------------------------
The current maintainer(s) are volunteers with unrelated jobs.
We can only pay sporadic attention to responding to your issue and pull request submissions.
Your patience is greatly appreciated!
Active maintainers
~~~~~~~~~~~~~~~~~~
* Itamar Turner-Trauring https://github.com/itamarst
* Tim Burke https://github.com/tipabu
* Hervé Beraud https://github.com/4383
Less active maintainers
~~~~~~~~~~~~~~~~~~~~~~~
* Sergey Shepelev https://github.com/temoto
* Jakub Stasiak https://github.com/jstasiak
* Nat Goodspeed https://github.com/nat-goodspeed
Original Authors
----------------
* Bob Ippolito
* Donovan Preston
Contributors
------------
* AG Projects
* Chris AtLee
* R\. Tyler Ballance
* Denis Bilenko
* Mike Barton
* Patrick Carlisle
* Ben Ford
* Andrew Godwin
* Brantley Harris
* Gregory Holt
* Joe Malicki
* Chet Murthy
* Eugene Oden
* radix
* Scott Robinson
* Tavis Rudd
* Sergey Shepelev
* Chuck Thier
* Nick V
* Daniele Varrazzo
* Ryan Williams
* Geoff Salmon
* Edward George
* Floris Bruynooghe
* Paul Oppenheim
* Jakub Stasiak
* Aldona Majorek
* Victor Sergeyev
* David Szotten
* Victor Stinner
* Samuel Merritt
* Eric Urban
* Miguel Grinberg
* Tuomo Kriikkula
Linden Lab Contributors
-----------------------
* John Beisley
* Tess Chu
* Nat Goodspeed
* Dave Kaprielian
* Kartic Krishnamurthy
* Bryan O'Sullivan
* Kent Quirk
* Ryan Williams
Thanks To
---------
* AdamKG, giving the hint that invalid argument errors were introduced post-0.9.0
* Luke Tucker, bug report regarding wsgi + webob
* Taso Du Val, reproing an exception squelching bug, saving children's lives ;-)
* Luci Stanescu, for reporting twisted hub bug
* Marcus Cavanaugh, for test case code that has been incredibly useful in tracking down bugs
* Brian Brunswick, for many helpful questions and suggestions on the mailing list
* Cesar Alaniz, for uncovering bugs of great import
* the grugq, for contributing patches, suggestions, and use cases
* Ralf Schmitt, for wsgi/webob incompatibility bug report and suggested fix
* Benoit Chesneau, bug report on green.os and patch to fix it
* Slant, better iterator implementation in tpool
* Ambroff, nice pygtk hub example
* Michael Carter, websocket patch to improve location handling
* Marcin Bachry, nice repro of a bug and good diagnosis leading to the fix
* David Ziegler, reporting issue #53
* Favo Yang, twisted hub patch
* Schmir, patch that fixes readline method with chunked encoding in wsgi.py, advice on patcher
* Slide, for open-sourcing gogreen
* Holger Krekel, websocket example small fix
* mikepk, debugging MySQLdb/tpool issues
* Malcolm Cleaton, patch for Event exception handling
* Alexey Borzenkov, for finding and fixing issues with Windows error detection (#66, #69), reducing dependencies in zeromq hub (#71)
* Anonymous, finding and fixing error in websocket chat example (#70)
* Edward George, finding and fixing an issue in the [e]poll hubs (#74), and in convenience (#86)
* Ruijun Luo, figuring out incorrect openssl import for wrap_ssl (#73)
* rfk, patch to get green zmq to respect noblock flag.
* Soren Hansen, finding and fixing issue in subprocess (#77)
* Stefano Rivera, making tests pass in absence of postgres (#78)
* Joshua Kwan, fixing busy-wait in eventlet.green.ssl.
* Nick Vatamaniuc, Windows SO_REUSEADDR patch (#83)
* Clay Gerrard, wsgi handle socket closed by client (#95)
* Eric Windisch, zmq getsockopt(EVENTS) wake correct threads (pull request 22)
* Raymond Lu, fixing busy-wait in eventlet.green.ssl.socket.sendall()
* Thomas Grainger, webcrawler example small fix, "requests" library import bug report, Travis integration
* Peter Portante, save syscalls in socket.dup(), environ[REMOTE_PORT] in wsgi
* Peter Skirko, fixing socket.settimeout(0) bug
* Derk Tegeler, Pre-cache proxied GreenSocket methods (Bitbucket #136)
* David Malcolm, optional "timeout" argument to the subprocess module (Bitbucket #89)
* David Goetz, wsgi: Allow minimum_chunk_size to be overriden on a per request basis
* Dmitry Orlov, websocket: accept Upgrade: websocket (lowercase)
* Zhang Hua, profile: accumulate results between runs (Bitbucket #162)
* Astrum Kuo, python3 compatibility fixes; greenthread.unlink() method
* Davanum Srinivas, Python3 compatibility fixes
* Dmitriy Kruglyak, PyPy 2.3 compatibility fix
* Jan Grant, Michael Kerrin, second simultaneous read (GH-94)
* Simon Jagoe, Python3 octal literal fix
* Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses
* raylu, fixing operator precedence bug in eventlet.wsgi
* Christoph Gysin, PEP 8 conformance
* Andrey Gubarev
* Corey Wright
* Deva
* Johannes Erdfelt
* Kevin
* QthCN
* Steven Hardy
* Stuart McLaren
* Tomaz Muraus
* ChangBo Guo(gcb), fixing typos in the documentation (GH-194)
* Marc Abramowitz, fixing the README so it renders correctly on PyPI (GH-183)
* Shaun Stanworth, equal chance to acquire semaphore from different greenthreads (GH-136)
* Lior Neudorfer, Make sure SSL retries are done using the exact same data buffer
* Sean Dague, wsgi: Provide python logging compatibility
* Tim Simmons, Use _socket_nodns and select in dnspython support
* Antonio Cuni, fix fd double close on PyPy
* Seyeong Kim
* Ihar Hrachyshka
* Janusz Harkot
* Fukuchi Daisuke
* Ramakrishnan G
* ashutosh-mishra
* Azhar Hussain
* Josh VanderLinden
* Levente Polyak
* Phus Lu
* Collin Stocks, fixing eventlet.green.urllib2.urlopen() so it accepts cafile, capath, or cadefault arguments
* Alexis Lee
* Steven Erenst
* Piët Delport
* Alex Villacís Lasso
* Yashwardhan Singh
* Tim Burke
* Ondřej Nový
* Jarrod Johnson
* Whitney Young
* Matthew D. Pagel
* Matt Yule-Bennett
* Artur Stawiarski
* Tal Wrii
* Roman Podoliaka
* Gevorg Davoian
* Ondřej Kobližek
* Yuichi Bando
* Feng
* Aayush Kasurde
* Linbing
* Geoffrey Thomas
* Costas Christofi, adding permessage-deflate weboscket extension support
* Peter Kovary, adding permessage-deflate weboscket extension support
* Konstantin Enchant
* James Page
* Stefan Nica
* Haikel Guemar
* Miguel Grinberg
* Chris Kerr
* Anthony Sottile
* Quan Tian
* orishoshan
* Matt Bennett
* Ralf Haferkamp
* Jake Tesler
* Aayush Kasurde
0707010000000A000081A400000000000000000000000166E1887C00000FBE000000000000000000000000000000000000003700000000eventlet-0.37.0+git.1726056572.8637820/CONTRIBUTING.md # Contributing to Eventlet
Please take a moment to review this document in order to make the contribution
process easy and effective for everyone involved.
Following these guidelines helps to communicate that you respect the time
of the developers managing and developing this open source project. In return,
they should reciprocate that respect in addressing your issue or assessing
patches and features.
## Using the issue tracker
The [issue tracker](https://github.com/eventlet/eventlet/issues) is
the preferred channel for [Discussion](#discussion), [bug reports](#bugs), [features requests](#features)
and [submitting pull requests](#pull-requests).
## Discussion
- There's an IRC channel dedicated to Eventlet: `#eventlet` on freenode. It's a pretty chill place to hang out!
- We have Eventlet Google+ Community. Join us, +1, share your ideas, report bugs, find new friends or even new job!
## Bug reports
A bug is a _demonstrable problem_ that is caused by the code in the repository.
Good bug reports are extremely helpful - thank you!
You may report bugs via GitHub https://github.com/eventlet/eventlet/issues/new
Guidelines for bug reports:
1. **Before filing issue try to search for solution on the web** — There are lots of good resources for Eventlet and its related information which can be helpful in resolving issues and get things done.
2. **Use the GitHub issue search** — check if the issue has already been
reported.
3. **Check if the issue has been fixed** — try to reproduce it using the
latest `master` or development branch in the repository.
4. Please be sure to report bugs [as effectively as possible](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html),
to ensure that we understand and act on them quickly.
A good bug report shouldn't leave others needing to chase you up for more information.
Please try to be as detailed as possible in your report.
- What is your environment?
- Which is eventlet version your using?
- `uname -a`
- `python -V`
- `pip freeze`
- What steps will reproduce the issue?
- What would you expect to be the outcome?
All these details will help people to fix any potential bugs.
Example of good bug report::
> Short description in title of issue like `HTTPS/SSL failure when using requests library on Python 3.4`
>
> `uname -a` output
>
> `python -V` output
>
> Steps to reproduce issue
## Feature requests
Feature requests are welcome. But take a moment to find out whether your idea
fits with the scope and aims of the project. It's up to *you* to make a strong
case to convince the project's developers of the merits of this feature. Please
provide as much detail and context as possible.
## Pull requests
Good pull requests - patches, improvements, new features - are a fantastic help.
They should remain focused in scope and avoid containing unrelated commits.
**Please ask first** before embarking on any significant pull request (e.g.
implementing features, re-factoring code), otherwise you risk spending a lot of
time working on something that the project's developers might not want to merge
into the project.
Please adhere to the coding conventions used throughout a project (indentation,
accurate comments, etc.) and any other requirements such as
- Test is required
- One commit is strongly preferred, except for very big changes
- Commit message should follow the following formula:
>subsystem: description of why the change is useful
>
>optional details
>
>links to related issues or websites
The why part is very important. Diff already says what you have done. But nobody knows why.
Feel free to append yourself into AUTHORS file, sections Thanks To or Contributors.
If you don't like these rules, raw patches are more than welcome!
**IMPORTANT**: By submitting a patch, you agree to allow the project owner to
license your work under the same license as that used by the project.
0707010000000B000081A400000000000000000000000166E1887C000004E6000000000000000000000000000000000000002F00000000eventlet-0.37.0+git.1726056572.8637820/LICENSE Unless otherwise noted, the files in Eventlet are under the following MIT license:
Copyright (c) 2005-2006, Bob Ippolito
Copyright (c) 2007-2010, Linden Research, Inc.
Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
0707010000000C000081A400000000000000000000000166E1887C000000DE000000000000000000000000000000000000003300000000eventlet-0.37.0+git.1726056572.8637820/MANIFEST.in recursive-include tests *.py *.crt *.key
recursive-include doc *.rst *.txt *.py Makefile *.png
recursive-include examples *.py *.html
include CONTRIBUTING.md MANIFEST.in NEWS AUTHORS LICENSE README.rst SECURITY.md tox.ini
0707010000000D000081A400000000000000000000000166E1887C0000B946000000000000000000000000000000000000002C00000000eventlet-0.37.0+git.1726056572.8637820/NEWS Unreleased
==========
0.37.0
======
* [fix] os.read/write waits until file descriptor is ready. https://github.com/eventlet/eventlet/pull/975
* [fix] Upgrade RLocks as last thing we do https://github.com/eventlet/eventlet/pull/970
* [security] drop header keys with underscores https://github.com/eventlet/eventlet/pull/959
* [doc] Various doc updates (Migration Guide, repair links, warns...)
0.36.1
======
* [fix] eventlet.websocket is not always used from eventlet.wsgi, so do not assume eventlet.set_idle exists https://github.com/eventlet/eventlet/pull/949
0.36.0
======
* [fix] Make sure asyncio hub doesn't use greendns for asyncio DNS APIs https://github.com/eventlet/eventlet/pull/938
* [fix] Make asyncio.to_thread work with the same semantics as normal asyncio https://github.com/eventlet/eventlet/pull/930
* [fix] Refactor congruence checks based on assert at runtime https://github.com/eventlet/eventlet/pull/932
* [tests] Run tests on macOS in CI, and some fixes to get it in reasonable state (#list https://github.com/eventlet/eventlet/pull/934
* [fix] Fix wsgi.server shutdown for in-flight requests https://github.com/eventlet/eventlet/pull/912
* [feature] Add debug convenience helpers - asyncio, threads https://github.com/eventlet/eventlet/pull/925
* [fix] Handle errors better. https://github.com/eventlet/eventlet/pull/923
0.35.2
======
* [fix] Fix tool.setuptools/packages list https://github.com/eventlet/eventlet/pull/921
* [security] Dnspython 2.6.1 - Address DoS via the Tudoor mechanism (CVE-2023-29483) https://github.com/eventlet/eventlet/pull/916
* [doc] add asyncio into the doc hub page https://github.com/eventlet/eventlet/pull/918
* [clean] clean obsolete python 2 code from the ssl module https://github.com/eventlet/eventlet/pull/915
* [fix] Add get_server_info to db_pool.py https://github.com/eventlet/eventlet/pull/324
* [fix] wsgi: Handle Timeouts from applications https://github.com/eventlet/eventlet/pull/911
* [fix] shrinks window before connecting https://github.com/eventlet/eventlet/pull/905
0.35.1
======
* [fix] Do not allow failed patching to stop execution https://github.com/eventlet/eventlet/pull/907
0.35.0
======
* [doc] Basic documentation for asyncio migration https://github.com/eventlet/eventlet/pull/892
* [tests] add minimal linting https://github.com/eventlet/eventlet/pull/894
* [doc] officially host docs on readthedocs https://github.com/eventlet/eventlet/pull/899
* [fix] fix truncate size nullable https://github.com/eventlet/eventlet/pull/789
* [fix] Handle transport endpoint shutdown in conditions https://github.com/eventlet/eventlet/pull/884
* [fix] Rework reject_bad_requests option https://github.com/eventlet/eventlet/pull/890
* [fix] Fix NameError introduced by #826 https://github.com/eventlet/eventlet/pull/890
* [feature] Support awaiting GreenThread in an `async def` context https://github.com/eventlet/eventlet/pull/889
* [infra] Extend test cert to 2049 https://github.com/eventlet/eventlet/pull/643
* [feature] Asyncio hub support for Python 3.7 to 3.9 https://github.com/eventlet/eventlet/pull/886
* [infra] Modernize doc generation https://github.com/eventlet/eventlet/pull/880
* [fix] Fix bad exceptions handlings https://github.com/eventlet/eventlet/pull/883
* [feature] Support using asyncio coroutines from inside greenlets https://github.com/eventlet/eventlet/pull/877
* [removal] Remove deprecated CGIHTTPServer and SimpleHTTPServer https://github.com/eventlet/eventlet/pull/881
* [governance] Update maintenance goals https://github.com/eventlet/eventlet/pull/850
* [feature] Add an asyncio hub for eventlet https://github.com/eventlet/eventlet/pull/870
0.34.3
======
* Fix security issue in the wsgi module related to RFC 9112 https://github.com/eventlet/eventlet/pull/826
* Fix segfault, a new approach for greening existing locks https://github.com/eventlet/eventlet/pull/866
* greendns: fix getaddrinfo parameter name https://github.com/eventlet/eventlet/pull/809
* Fix deprecation warning on ssl.PROTOCOL_TLS https://github.com/eventlet/eventlet/pull/872
* Pytests, fix error at teardown of TestGreenSocket.test_full_duplex https://github.com/eventlet/eventlet/pull/871
* Skip test which uses Py cgi module https://github.com/eventlet/eventlet/pull/865
* Drop old code based on python < 3.7
0.34.2
======
* Allowing inheritance of GreenSSLSocket without overriding the __new_ method https://github.com/eventlet/eventlet/pull/796
* [bug] Fix broken API related to `__version__` removal https://github.com/eventlet/eventlet/pull/859
* [doc] Fix pypi broken link https://github.com/eventlet/eventlet/pull/857
0.34.1
======
* [bug] Fix memory leak in greendns https://github.com/eventlet/eventlet/issues/810
* [infra] Fix OIDC authentication failure https://github.com/eventlet/eventlet/pull/855
* [bug] Ignore asyncore and asynchat for Python 3.12+ https://github.com/eventlet/eventlet/issues/804
0.34.0 (Not released on Pypi)
=============================
* Dropped support for Python 3.6 and earlier.
* Fix Python 3.13 compat by adding missing attibute '_is_main_interpreter' https://github.com/eventlet/eventlet/pull/847
* Add support of Python 3.12 https://github.com/eventlet/eventlet/pull/817
* Drop unmaintained and unused stdlib tests https://github.com/eventlet/eventlet/pull/820
* Fix tests and CI for Python 3.7 and higher https://github.com/eventlet/eventlet/pull/831 and https://github.com/eventlet/eventlet/pull/832
* Stop claiming to create universal wheels https://github.com/eventlet/eventlet/pull/841
* Fix green logging locks for Python versions <= 3.10 https://github.com/eventlet/eventlet/pull/754
0.33.3
======
* dnspython 2.3.0 raised AttributeError: module 'dns.rdtypes' has no attribute 'ANY' https://github.com/eventlet/eventlet/issues/781
0.33.2
======
* greenio: GreenPipe/fdopen() with 'a' in mode raised io.UnsupportedOperation: File or stream is not writable https://github.com/eventlet/eventlet/pull/758
0.33.1
======
* Prevent deadlock on logging._lock https://github.com/eventlet/eventlet/issues/742
0.33.0
======
* green.thread: unlocked Lock().release() should raise exception, returned True https://github.com/eventlet/eventlet/issues/697
* wsgi: Don't break HTTP framing during 100-continue handling https://github.com/eventlet/eventlet/pull/578
* Python 3.10 partial support https://github.com/eventlet/eventlet/pull/715
* greendns: Create a DNS resolver lazily rather than on import https://github.com/eventlet/eventlet/issues/462
* ssl: GreenSSLContext minimum_version and maximum_version setters https://github.com/eventlet/eventlet/issues/726
0.32.0
======
* greendns: compatibility with dnspython v2 https://github.com/eventlet/eventlet/pull/722
* green.ssl: wrap_socket now accepts argument `ciphers` https://github.com/eventlet/eventlet/pull/718
* websocket: control frames are now always uncompressed per RFC 7692; Thanks to Onno Kortmann
0.31.1
======
* ssl: py3.6 using client certificates raised ValueError: check_hostname needs server_hostname argument https://github.com/eventlet/eventlet/pull/705
0.31.0
======
* IMPORTANT: websocket: Limit maximum uncompressed frame length to 8MiB https://github.com/eventlet/eventlet/security/advisories/GHSA-9p9m-jm8w-94p2
0.30.3
======
* wsgi: websocket ALREADY_HANDLED flag on corolocal
* green.ssl: Set suppress_ragged_eofs default based on SSLSocket defaults
* greenio: socket.connect_ex returned None instead of 0 on success
* Use _imp instead of deprecated imp
0.30.2
======
* greendns: patch ssl to fix RecursionError on SSLContext.options.__set__ https://github.com/eventlet/eventlet/issues/677
0.30.1
======
* patcher: built-in open() did not accept kwargs https://github.com/eventlet/eventlet/issues/683
0.30.0
======
* pyopenssl tsafe module was deprecated and removed in v20.0.0
* deprecate pyevent hub
* Deprecate CPython 2.7 and 3.4 support
* py39: Add _at_fork_reinit method to Semaphores
0.29.1
======
patcher: [py27] recursion error in pytest/python2.7 installing register_at_fork https://github.com/eventlet/eventlet/issues/660
patcher: monkey_patch(builtins=True) failed on py3 because `file` class is gone https://github.com/eventlet/eventlet/issues/541
don't crash on PyPy 7.0.0 https://github.com/eventlet/eventlet/pull/547
Only install monotonic on python2 https://github.com/eventlet/eventlet/pull/583
0.29.0
======
* ssl: context wrapped listener fails accept() https://github.com/eventlet/eventlet/issues/651
0.28.1
======
* Sorry, Eventlet was broken on Windows for versions 0.27-0.28
patcher: no os.register_at_fork on Windows (#654)
* Clean up TypeError in __del__
0.28.0
======
* Always remove the right listener from the hub https://github.com/eventlet/eventlet/pull/645
0.27.0
======
* patcher: Clean up threading book-keeping at fork when monkey-patched
* backdoor: handle disconnects better
0.26.1
======
* pin dnspython <2.0.0 https://github.com/eventlet/eventlet/issues/619
0.26.0
======
* Fix compatibility with SSLContext usage >= Python 3.7
* wsgi: Fix header capitalization on py3
* Fix #508: Py37 Deadlock ThreadPoolExecutor (#598)
* drop Python 3.4 support
* Fix misc SyntaxWarning's under Python 3.8
* Remove unnecessary assignment in _recv_loop (#601)
0.25.2
======
* green.ssl: redundant set_nonblocking() caused SSLWantReadError
0.25.1
======
* wsgi (tests): Stop using deprecated cgi.parse_qs() to support Python 3.8; Thanks to Miro Hrončok
* os: Add workaround to `open` for pathlib on py 3.7; Thanks to David Szotten
0.25.0
======
* wsgi: Only send 100 Continue response if no response has been sent yet; Thanks to Tim Burke
* wsgi: Return 400 on negative Content-Length request headers; Thanks to Tim Burke
* Make a GreenPile with no spawn()s an empty sequence; Thanks to nat-goodspeed
* wsgi: fix Input.readlines when dealing with chunked input; Thanks to Tim Burke
* wsgi: fix Input.readline on Python 3; Thanks to Tim Burke
* wsgi: Stop replacing invalid UTF-8 on py3; Thanks to Tim Burke
* ssl: Fix compatibility with Python 3.7 ssl.SSLSocket; Thanks to Junyi
* reimport submodule as well in patcher.inject; Thanks to Junyi
* use Python 2 compatible syntax for keyword-only args; Thanks to nat-goodspeed
* wsgi: Catch and swallow IOErrors during discard(); Thanks to Tim Burke
* Fix for Python 3.7; Thanks to Marcel Plch
* Fix race that could cause using epolls as default hub even when platform does not support it; Thanks to Sergey Shepelev
* wsgi: make Expect 100-continue field-value case-insensitive; Thanks to Julien Kasarherou
* greenthread: optimize _exit_funcs getattr/del dance; Thanks to Alex Kashirin
* New benchmarks runner; Thanks to Sergey Shepelev
* ssl: fix connect to use monotonic clock for timeout; Thanks to Sergey Shepelev
0.24.1
======
* greendns: don't contact nameservers if one entry is returned from hosts file; Thanks to Daniel Alvarez
0.24.0
======
* greendns: Fix infinite loop when UDP source address mismatch; Thanks to Lon Hohberger
* greendns: Fix bad ipv6 comparison; Thanks to Lon Hohberger
* wsgi: Use byte strings on py2 and unicode strings on py3; Thanks to Tim Burke
* pools: put to empty pool would block sometimes; Thanks to Sam Merritt
* greendns: resolving over TCP produced ValueError; Thanks to Jaume Marhuenda
* support.greendns: ImportError when dns.rdtypes was imported before eventlet; Thanks to Jaume Marhuenda
* greendns: full comment lines were not skipped; Thanks to nat-goodspeed
* Drop support for Python3.3; Python2.6 and python-epoll package
* external dependencies for six, monotonic, dnspython; Thanks to nat-goodspeed
* wsgi: Don't strip all Unicode whitespace from headers on py3; Thanks to Tim Burke
0.23.0
======
* green.threading: current_thread() did not see new monkey-patched threads; Thanks to Jake Tesler
* tpool: exception in tpool-ed call leaked memory via backtrace
* wsgi: latin-1 encoding dance for environ[PATH_INFO]
0.22.1
======
* Fixed issue installing excess enum34 on Python3.4+ (rebuild with updated setuptools)
* event: Event.wait() timeout=None argument to be compatible with upstream CPython
* greendns: Treat /etc/hosts entries case-insensitive; Thanks to Ralf Haferkamp
0.22.0
======
* convenience: (SO_REUSEPORT) socket.error is not OSError on Python 2; Thanks to JacoFourie@github
* convenience: SO_REUSEPORT is not available on WSL platform (Linux on Windows)
* convenience: skip SO_REUSEPORT for bind on random port (0)
* dns: reading /etc/hosts raised DeprecationWarning for universal lines on Python 3.4+; Thanks to Chris Kerr
* green.openssl: Drop OpenSSL.rand support; Thanks to Haikel Guemar
* green.subprocess: keep CalledProcessError identity; Thanks to Linbing@github
* greendns: be explicit about expecting bytes from sock.recv; Thanks to Matt Bennett
* greendns: early socket.timeout was breaking IO retry loops
* GreenSocket.accept does not notify_open; Thanks to orishoshan
* patcher: set locked RLocks' owner only when patching existing locks; Thanks to Quan Tian
* patcher: workaround for monotonic "no suitable implementation"; Thanks to Geoffrey Thomas
* queue: empty except was catching too much
* socket: context manager support; Thanks to Miguel Grinberg
* support: update monotonic 1.3 (5c0322dc559bf)
* support: upgrade bundled dnspython to 1.16.0 (22e9de1d7957e) https://github.com/eventlet/eventlet/issues/427
* websocket: fd leak when client did not close connection properly; Thanks to Konstantin Enchant
* websocket: support permessage-deflate extension; Thanks to Costas Christofi and Peter Kovary
* wsgi: close idle connections (also applies to websockets)
* wsgi: deprecated options are one step closer to removal
* wsgi: handle remote connection resets; Thanks to Stefan Nica
0.21.0
======
* New timeout error API: .is_timeout=True on exception object
It's now easy to test if network error is transient and retry is appropriate.
Please spread the word and invite other libraries to support this interface.
* hubs: use monotonic clock by default (bundled package); Thanks to Roman Podoliaka and Victor Stinner
* dns: EVENTLET_NO_GREENDNS option is back, green is still default
* dns: hosts file was consulted after nameservers
* ssl: RecursionError on Python3.6+; Thanks to justdoit0823@github and Gevent developers
* wsgi: log_output=False was not disabling startup and accepted messages
* greenio: Fixed OSError: [WinError 10038] Socket operation on nonsocket
* dns: EAI_NODATA was removed from RFC3493 and FreeBSD
* green.select: fix mark_as_closed() wrong number of args
* green.zmq: socket.{recv,send}_* signatures did not match recent upstream pyzmq
* New feature: Add zipkin tracing to eventlet
* db_pool: proxy Connection.set_isolation_level()
* green.zmq: support RCVTIMEO (receive timeout)
* green.profile: Python3 compatibility; Thanks to Artur Stawiarski
* support: upgrade bundled six to 1.10 (dbfbfc818e3d)
* python3.6: http.client.request support chunked_encoding
0.20.1
======
* dns: try unqualified queries as top level
* test_import_patched_defaults bended to play with pyopenssl>=16.1.0
* Explicit environ flag for importing eventlet.__version__ without ignoring import errors
* Type check Semaphore, GreenPool arguments; Thanks to Matthew D. Pagel
0.20.0
======
* IMPORTANT: removed select.poll() function
* DNS resolving is always green with dnspython bundled in
* greenio: only trampoline when we block
* convenience: listen() sets SO_REUSEPORT when available; Thanks to Zhengwei Gao
* ssl: Fix "TypeError: read() argument 2 must be read-write bytes-like object, not None"
* greenio: _recv_loop behaviour with recv_into on closed sock
* ipv6: getaddrinfo would fail with scope index
* green.zmq: Support {send,recv}_{string,json,pyobj} wrappers
* greendns: Return answers from /etc/hosts despite nameserver errors
* patcher: fixed green existing locks fail (Python3)
* Add DAGPool, a dependency-driven greenthread pool
* wsgi: Unix socket address representation; Thanks to Samuel Merritt
* tpool: isolate internal socket from default timeout; Thanks to Alex Villacís Lasso
* wsgi: only skip Content-Type and Content-Length headers (GH-327)
* wsgi: 400 on blank Content-Length headers (GH-334)
* greenio: makefile related pypy socket ref counting
* ssl: Fix recv_into blocking when reading chunks of data
* websocket: support Gunicorn environ['gunicorn.socket']
0.19.0
======
* ssl: IMPORTANT DoS FIX do_handshake_connect=False in server accept(); Thanks to Garth Mollett
* patcher: patch existing threading locks; Thanks to Alexis Lee
* green.urllib2: missing patched ssl module; Thanks to Collin RM Stocks
* wsgi: environ[headers_raw] tuple of unmodified name: value pairs
* test against modern pyopenssl 16.0.0 for Python 2.7+; Thanks to Victor Stinner
* wsgi: document compatibility with python `logging`
* Minor grammatical improvements and typo fixes to the docs; Thanks to Steven Erenst
0.18.4
======
* wsgi: change TCP_NODELAY to TCP_QUICKACK, ignore socket error when not available
0.18.3
======
* wsgi: Use buffered writes - fixes partial socket.send without custom
writelines(); Github issue #295
* wsgi: TCP_NODELAY enabled by default
0.18.2
======
* wsgi: Fix data loss on partial writes (socket.send); Thanks to Jakub Stasiak
0.18.1
======
* IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1
* patcher: Fix AttributeError in subprocess communicate()
* greenio: Fix "TypeError: an integer is required" in sendto()
0.18.0
======
* IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1
* greenio: Fixed a bug that could cause send() to start an endless loop on
ENOTCONN; Thanks to Seyeong Kim
* wsgi: Fixed UNIX socket address being trimmed in "wsgi starting" log; Thanks
to Ihar Hrachyshka
* ssl: Ported eventlet.green.OpenSSL to Python 3; Thanks to Victor Stinner
* greenio: Made read() support buflen=-1 and added readall() (Python 3);
Thanks to David Szotten
* wsgi: Made the error raised in case of chunk read failures more precise (this
should be backwards compatible as the new exception class,
wsgi.ChunkReadError, is a subclass of ValueError which was being used there
before); Thanks to Samuel Merritt
* greenio: Fixed socket.recv() sometimes returning str instead of bytes on
Python 3; Thanks to Janusz Harkot
* wsgi: Improved request body discarding
* websocket: Fixed TypeError on empty websocket message (Python 3); Thanks to
Fukuchi Daisuke
* subprocess: Fixed universal_newlines support
* wsgi: Output of 0-byte chunks is now suppressed; Thanks to Samuel Merritt
* Improved the documentation; Thanks to Ramakrishnan G, ashutosh-mishra and
Azhar Hussain
* greenio: Changed GreenFileIO.write() (Python 3) to always write all data to
match the behavior on Python 2; Thanks to Victor Stinner
* subprocess: Fixed missing subprocess.mswindows attribute on Python 3.5;
Thanks to Josh VanderLinden
* ssl/monkey patching: Fixed a bug that would cause merely importing eventlet
to monkey patch the ssl module; Thanks to David Szotten
* documentation: Added support for building plain text documentation; thanks
to Levente Polyak
* greenio: Fixed handling blocking IO errors in various GreenSocket methods;
Thanks to Victor Stinner
* greenio: Fixed GreenPipe ignoring the bufsize parameter on Python 2; Thanks
to Phus Lu
* backdoor: Added Unix and IPv6 socket support; Thanks to Eric Urban
Backwards incompatible:
* monkey patching: The following select methods and selector classes are now
removed, instead of being left in their respective modules after patching
even though they are not green (this also fixes HTTPServer.serve_forever()
blocking whole process on Python 3):
* select.poll
* select.epoll
* select.devpoll
* select.kqueue
* select.kevent
* selectors.PollSelector
* selectors.EpollSelector
* selectors.DevpollSelector
* selectors.KqueueSelector
Additionally selectors.DefaultSelector points to a green SelectSelector
* greenio: Fixed send() to no longer behave like sendall() which makes it
consistent with Python standard library and removes a source of very subtle
errors
0.17.4
======
* ssl: incorrect initalization of default context; Thanks to stuart-mclaren
0.17.3
======
* green.thread: Python3.3+ fixes; Thanks to Victor Stinner
* Semaphore.acquire() accepts timeout=-1; Thanks to Victor Stinner
0.17.2
======
* wsgi: Provide python logging compatibility; Thanks to Sean Dague
* greendns: fix premature connection closing in DNS proxy; Thanks to Tim Simmons
* greenio: correct fd close; Thanks to Antonio Cuni and Victor Sergeyev
* green.ssl: HTTPS client Python 2.7.9+ compatibility
* setup: tests.{isolated,manual} polluted top-level packages
0.17.1
======
* greendns: fix dns.name import and Python3 compatibility
0.17
====
* Full Python3 compatibility; Thanks to Jakub Stasiak
* greendns: IPv6 support, improved handling of /etc/hosts; Thanks to Floris Bruynooghe
* tpool: make sure we return results during killall; Thanks to David Szotten
* semaphore: Don't hog a semaphore if someone else is waiting for it; Thanks to Shaun Stanworth
* green.socket: create_connection() was wrapping all exceptions in socket.error; Thanks to Donagh McCabe
* Make sure SSL retries are done using the exact same data buffer; Thanks to Lior Neudorfer
* greenio: shutdown already closed sockets without error; Thanks to David Szotten
0.16.1
======
* Wheel build 0.16.0 incorrectly shipped removed module eventlet.util.
0.16.0
======
* Fix SSL socket wrapping and Python 2.7.9 compatibility; Thanks to Jakub Stasiak
* Fix monkey_patch() on Python 3; Thanks to Victor Stinner
* Fix "maximum recursion depth exceeded in GreenSocket.__del__"; Thanks to Jakub Stasiak
* db_pool: BaseConnectionPool.clear updates .current_size #139; Thanks to Andrey Gubarev
* Fix __str__ method on the TimeoutExpired exception class.; Thanks to Tomaz Muraus
* hubs: drop Twisted support
* Removed deprecated modules: api, most of coros, pool, proc, processes and util
* Improved Python 3 compatibility (including patch by raylu); Thanks to Jakub Stasiak
* Allow more graceful shutdown of wsgi server; Thanks to Stuart McLaren
* wsgi.input: Make send_hundred_continue_headers() a public API; Thanks to Tushar Gohad
* tpool: Windows compatibility, fix ResourceWarning. Thanks to Victor Stinner
* tests: Fix timers not cleaned up on MySQL test skips; Thanks to Corey Wright
0.15.2
======
* greenio: fixed memory leak, introduced in 0.15.1; Thanks to Michael Kerrin, Tushar Gohad
* wsgi: Support optional headers w/ "100 Continue" responses; Thanks to Tushar Gohad
0.15.1
======
* greenio: Fix second simultaneous read (parallel paramiko issue); Thanks to Jan Grant, Michael Kerrin
* db_pool: customizable connection cleanup function; Thanks to Avery Fay
0.15
====
* Python3 compatibility -- **not ready yet**; Thanks to Astrum Kuo, Davanum Srinivas, Jakub Stasiak, Victor Sergeyev
* coros: remove Actor which was deprecated in 2010-01
* saranwrap: remove saranwrap which was deprecated in 2010-02
* PyPy compatibility fixes; Thanks to Dmitriy Kruglyak, Jakub Stasiak
* green.profile: accumulate results between runs; Thanks to Zhang Hua
* greenthread: add .unlink() method; Thanks to Astrum Kuo
* packaging: Generate universal wheels; Thanks to Jakub Stasiak
* queue: Make join not wait if there are no unfinished tasks; Thanks to Jakub Stasiak
* tpool: proxy __enter__, __exit__ fixes Bitbucket-158; Thanks to Eric Urban
* websockets: Add websockets13 support; handle lack of Upgrade header; Thanks to Edward George
* wsgi: capitalize_response_headers option
0.14
====
* wsgi: handle connection socket timeouts; Thanks to Paul Oppenheim
* wsgi: close timed out client connections
* greenio: socket pypy compatibility; Thanks to Alex Gaynor
* wsgi: env['wsgi.input'] was returning 1 byte strings; Thanks to Eric Urban
* green.ssl: fix NameError; Github #17; Thanks to Jakub Stasiak
* websocket: allow "websocket" in lowercase in Upgrade header; Compatibility with current Google Chrome; Thanks to Dmitry Orlov
* wsgi: allow minimum_chunk_size to be overriden on a per request basis; Thanks to David Goetz
* wsgi: configurable socket_timeout
0.13
====
* hubs: kqueue support! Thanks to YAMAMOTO Takashi, Edward George
* greenio: Fix AttributeError on MacOSX; Bitbucket #136; Thanks to Derk Tegeler
* green: subprocess: Fix subprocess.communicate() block on Python 2.7; Thanks to Edward George
* green: select: ensure that hub can .wait() at least once before timeout; Thanks to YAMAMOTO Takashi
* tpool: single request queue to avoid deadlocks; Bitbucket pull request 31,32; Thanks to Edward George
* zmq: pyzmq 13.x compatibility; Thanks to Edward George
* green: subprocess: Popen.wait() accepts new `timeout` kwarg; Python 3.3 and RHEL 6.1 compatibility
* hubs: EVENTLET_HUB can point to external modules; Thanks to Edward George
* semaphore: support timeout for acquire(); Thanks to Justin Patrin
* support: do not clear sys.exc_info if can be preserved (greenlet >= 0.3.2); Thanks to Edward George
* Travis continous integration; Thanks to Thomas Grainger, Jakub Stasiak
* wsgi: minimum_chunk_size of last Server altered all previous (global variable); Thanks to Jakub Stasiak
* doc: hubs: Point to the correct function in exception message; Thanks to Floris Bruynooghe
0.12
====
* zmq: Fix 100% busy CPU in idle after .bind(PUB) (thanks to Geoff Salmon)
* greenio: Fix socket.settimeout() did not switch back to blocking mode (thanks to Peter Skirko)
* greenio: socket.dup() made excess fcntl syscalls (thanks to Peter Portante)
* setup: Remove legacy --without-greenlet option and unused httplib2 dependency (thanks to Thomas Grainger)
* wsgi: environ[REMOTE_PORT], also available in log_format, log accept event (thanks to Peter Portante)
* tests: Support libzmq 3.0 SNDHWM option (thanks to Geoff Salmon)
0.11
====
* ssl: Fix 100% busy CPU in socket.sendall() (thanks to Raymon Lu)
* zmq: Return linger argument to Socket.close() (thanks to Eric Windisch)
* tests: SSL tests were always skipped due to bug in skip_if_no_ssl decorator
0.10
====
* greenio: Fix relative seek() (thanks to AlanP)
* db_pool: Fix pool.put() TypeError with min_size > 1 (thanks to Jessica Qi)
* greenthread: Prevent infinite recursion with linking to current greenthread (thanks to Edward George)
* zmq: getsockopt(EVENTS) wakes correct threads (thanks to Eric Windisch)
* wsgi: Handle client disconnect while sending response (thanks to Clay Gerrard)
* hubs: Ensure that new hub greenlet is parent of old one (thanks to Edward George)
* os: Fix waitpid() returning (0, 0) (thanks to Vishvananda Ishaya)
* tpool: Add set_num_threads() method to set the number of tpool threads (thanks to David Ibarra)
* threading, zmq: Fix Python 2.5 support (thanks to Floris Bruynooghe)
* tests: tox configuration for all supported Python versions (thanks to Floris Bruynooghe)
* tests: Fix zmq._QueueLock test in Python2.6
* tests: Fix patcher_test on Darwin (/bin/true issue) (thanks to Edward George)
* tests: Skip SSL tests when not available (thanks to Floris Bruynooghe)
* greenio: Remove deprecated GreenPipe.xreadlines() method, was broken anyway
0.9.17
======
* ZeroMQ support calling send and recv from multiple greenthreads (thanks to Geoff Salmon)
* SSL: unwrap() sends data, and so it needs trampolining (#104 thanks to Brandon Rhodes)
* hubs.epolls: Fix imports for exception handler (#123 thanks to Johannes Erdfelt)
* db_pool: Fix .clear() when min_size > 0
* db_pool: Add MySQL's insert_id() method (thanks to Peter Scott)
* db_pool: Close connections after timeout, fix get-after-close race condition with using TpooledConnectionPool (thanks to Peter Scott)
* threading monkey patch fixes (#115 thanks to Johannes Erdfelt)
* pools: Better accounting of current_size in pools.Pool (#91 thanks to Brett Hoerner)
* wsgi: environ['RAW_PATH_INFO'] with request path as received from client (thanks to dweimer)
* wsgi: log_output flag (thanks to Juan Manuel Garcia)
* wsgi: Limit HTTP header size (thanks to Gregory Holt)
* wsgi: Configurable maximum URL length (thanks to Tomas Sedovic)
0.9.16
======
* SO_REUSEADDR now correctly set.
0.9.15
======
* ZeroMQ support without an explicit hub now implemented! Thanks to Zed Shaw for the patch.
* zmq module supports the NOBLOCK flag, thanks to rfk. (#76)
* eventlet.wsgi has a debug flag which can be set to false to not send tracebacks to the client (per redbo's request)
* Recursive GreenPipe madness forestalled by Soren Hansen (#77)
* eventlet.green.ssl no longer busywaits on send()
* EEXIST ignored in epoll hub (#80)
* eventlet.listen's behavior on Windows improved, thanks to Nick Vatamaniuc (#83)
* Timeouts raised within tpool.execute are propagated back to the caller (thanks again to redbo for being the squeaky wheel)
0.9.14
======
* Many fixes to the ZeroMQ hub, which now requires version 2.0.10 or later. Thanks to Ben Ford.
* ZeroMQ hub no longer depends on pollhub, and thus works on Windows (thanks, Alexey Borzenkov)
* Better handling of connect errors on Windows, thanks again to Alexey Borzenkov.
* More-robust Event delivery, thanks to Malcolm Cleaton
* wsgi.py now distinguishes between an empty query string ("") and a non-existent query string (no entry in environ).
* wsgi.py handles ipv6 correctly (thanks, redbo)
* Better behavior in tpool when you give it nonsensical numbers, thanks to R. Tyler for the nonsense. :)
* Fixed importing on 2.5 (#73, thanks to Ruijun Luo)
* Hub doesn't hold on to invalid fds (#74, thanks to Edward George)
* Documentation for eventlet.green.zmq, courtesy of Ben Ford
0.9.13
======
* ZeroMQ hub, and eventlet.green.zmq make supersockets green. Thanks to Ben Ford!
* eventlet.green.MySQLdb added. It's an interface to MySQLdb that uses tpool to make it appear nonblocking
* Greenthread affinity in tpool. Each greenthread is assigned to the same thread when using tpool, making it easier to work with non-thread-safe libraries.
* Eventlet now depends on greenlet 0.3 or later.
* Fixed a hang when using tpool during an import causes another import. Thanks to mikepk for tracking that down.
* Improved websocket draft 76 compliance, thanks to Nick V.
* Rare greenthread.kill() bug fixed, which was probably brought about by a bugfix in greenlet 0.3.
* Easy_installing eventlet should no longer print an ImportError about greenlet
* Support for serving up SSL websockets, thanks to chwagssd for reporting #62
* eventlet.wsgi properly sets 'wsgi.url_scheme' environment variable to 'https', and 'HTTPS' to 'on' if serving over ssl
* Blocking detector uses setitimer on 2.6 or later, allowing for sub-second block detection, thanks to rtyler.
* Blocking detector is documented now, too
* socket.create_connection properly uses dnspython for nonblocking dns. Thanks to rtyler.
* Removed EVENTLET_TPOOL_DNS, nobody liked that. But if you were using it, install dnspython instead. Thanks to pigmej and gholt.
* Removed _main_wrapper from greenthread, thanks to Ambroff adding keyword arguments to switch() in 0.3!
0.9.12
======
* Eventlet no longer uses the Twisted hub if Twisted is imported -- you must call eventlet.hubs.use_hub('twistedr') if you want to use it. This prevents strange race conditions for those who want to use both Twisted and Eventlet separately.
* Removed circular import in twistedr.py
* Added websocket multi-user chat example
* Not using exec() in green modules anymore.
* eventlet.green.socket now contains all attributes of the stdlib socket module, even those that were left out by bugs.
* Eventlet.wsgi doesn't call print anymore, instead uses the logfiles for everything (it used to print exceptions in one place).
* Eventlet.wsgi properly closes the connection when an error is raised
* Better documentation on eventlet.event.Event.send_exception
* Adding websocket.html to tarball so that you can run the examples without checking out the source
0.9.10
======
* Greendns: if dnspython is installed, Eventlet will automatically use it to provide non-blocking DNS queries. Set the environment variable 'EVENTLET_NO_GREENDNS' if you don't want greendns but have dnspython installed.
* Full test suite passes on Python 2.7.
* Tests no longer depend on simplejson for >2.6.
* Potential-bug fixes in patcher (thanks to Schmir, and thanks to Hudson)
* Websockets work with query strings (thanks to mcarter)
* WSGI posthooks that get called after the request completed (thanks to gholt, nice docs, too)
* Blocking detector merged -- use it to detect places where your code is not yielding to the hub for > 1 second.
* tpool.Proxy can wrap callables
* Tweaked Timeout class to do something sensible when True is passed to the constructor
0.9.9
=====
* A fix for monkeypatching on systems with psycopg version 2.0.14.
* Improved support for chunked transfers in wsgi, plus a bunch of tests from schmir (ported from gevent by redbo)
* A fix for the twisted hub from Favo Yang
0.9.8
=====
* Support for psycopg2's asynchronous mode, from Daniele Varrazzo
* websocket module is now part of core Eventlet with 100% unit test coverage thanks to Ben Ford. See its documentation at http://eventlet.net/doc/modules/websocket.html
* Added wrap_ssl convenience method, meaning that we truly no longer need api or util modules.
* Multiple-reader detection code protects against the common mistake of having multiple greenthreads read from the same socket at the same time, which can be overridden if you know what you're doing.
* Cleaner monkey_patch API: the "all" keyword is no longer necessary.
* Pool objects have a more convenient constructor -- no more need to subclass
* amajorek's reimplementation of GreenPipe
* Many bug fixes, major and minor.
0.9.7
=====
* GreenPipe is now a context manager (thanks, quad)
* tpool.Proxy supports iterators properly
* bug fixes in eventlet.green.os (thanks, Benoit)
* much code cleanup from Tavis
* a few more example apps
* multitudinous improvements in Py3k compatibility from amajorek
0.9.6
=====
* new EVENTLET_HUB environment variable allows you to select a hub without code
* improved GreenSocket and GreenPipe compatibility with stdlib
* bugfixes on GreenSocket and GreenPipe objects
* code coverage increased across the board
* Queue resizing
* internal DeprecationWarnings largely eliminated
* tpool is now reentrant (i.e., can call tpool.execute(tpool.execute(foo)))
* more reliable access to unpatched modules reduces some race conditions when monkeypatching
* completely threading-compatible corolocal implementation, plus tests and enthusiastic adoption
* tests stomp on each others' toes less
* performance improvements in timers, hubs, greenpool
* Greenlet-aware profile module courtesy of CCP
* support for select26 module's epoll
* better PEP-8 compliance and import cleanup
* new eventlet.serve convenience function for easy TCP servers
0.9.5
=====
* support psycopg in db_pool
* smart patcher that does the right patching when importing without needing to understand plumbing of patched module
* patcher.monkey_patch() method replacing util.wrap_*
* monkeypatch threading support
* removed api.named
* imported timeout module from gevent, replace exc_after and with_timeout()
* replace call_after with spawn_after; this is so that users don't see the Timer class
* added cancel() method to GreenThread to support the semantic of "abort if not already in the middle of something"
* eventlet.green.os with patched read() and write(), etc
* moved stuff from wrap_pipes_with_coroutine_pipe into green.os
* eventlet.green.subprocess instead of eventlet.processes
* improve patching docs, explaining more about patcher and why you'd use eventlet.green
* better documentation on greenpiles
* deprecate api.py completely
* deprecate util.py completely
* deprecate saranwrap
* performance improvements in the hubs
* much better documentation overall
* new convenience functions: eventlet.connect and eventlet.listen. Thanks, Sergey!
0.9.4
=====
* Deprecated coros.Queue and coros.Channel (use queue.Queue instead)
* Added putting and getting methods to queue.Queue.
* Added eventlet.green.Queue which is a greened clone of stdlib Queue, along with stdlib tests.
* Changed __init__.py so that the version number is readable even if greenlet's not installed.
* Bugfixes in wsgi, greenpool
0.9.3
=====
* Moved primary api module to __init__ from api. It shouldn't be necessary to import eventlet.api anymore; import eventlet should do the same job.
* Proc module deprecated in favor of greenthread
* New module greenthread, with new class GreenThread.
* New GreenPool class that replaces pool.Pool.
* Deprecated proc module (use greenthread module instead)
* tpooled gethostbyname is configurable via environment variable EVENTLET_TPOOL_GETHOSTBYNAME
* Removed greenio.Green_fileobject and refactored the code therein to be more efficient. Only call makefile() on sockets now; makeGreenFile() is deprecated. The main loss here is that of the readuntil method. Also, Green_fileobjects used to be auto-flushing; flush() must be called explicitly now.
* Added epoll support
* Improved documentation across the board.
* New queue module, API-compatible with stdlib Queue
* New debug module, used for enabling verbosity within Eventlet that can help debug applications or Eventlet itself.
* Bugfixes in tpool, green.select, patcher
* Deprecated coros.execute (use eventlet.spawn instead)
* Deprecated coros.semaphore (use semaphore.Semaphore or semaphore.BoundedSemaphore instead)
* Moved coros.BoundedSemaphore to semaphore.BoundedSemaphore
* Moved coros.Semaphore to semaphore.Semaphore
* Moved coros.event to event.Event
* Deprecated api.tcp_listener, api.connect_tcp, api.ssl_listener
* Moved get_hub, use_hub, get_default_hub from eventlet.api to eventlet.hubs
* Renamed libevent hub to pyevent.
* Removed previously-deprecated features tcp_server, GreenSSL, erpc, and trap_errors.
* Removed saranwrap as an option for making db connections nonblocking in db_pool.
0.9.2
=====
* Bugfix for wsgi.py where it was improperly expecting the environ variable to be a constant when passed to the application.
* Tpool.py now passes its tests on Windows.
* Fixed minor performance issue in wsgi.
0.9.1
=====
* PyOpenSSL is no longer required for Python 2.6: use the eventlet.green.ssl module. 2.5 and 2.4 still require PyOpenSSL.
* Cleaned up the eventlet.green packages and their associated tests, this should result in fewer version-dependent bugs with these modules.
* PyOpenSSL is now fully wrapped in eventlet.green.OpenSSL; using it is therefore more consistent with using other green modules.
* Documentation on using SSL added.
* New green modules: ayncore, asynchat, SimpleHTTPServer, CGIHTTPServer, ftplib.
* Fuller thread/threading compatibility: patching threadlocal with corolocal so coroutines behave even more like threads.
* Improved Windows compatibility for tpool.py
* With-statement compatibility for pools.Pool objects.
* Refactored copyrights in the files, added LICENSE and AUTHORS files.
* Added support for logging x-forwarded-for header in wsgi.
* api.tcp_server is now deprecated, will be removed in a future release.
* Added instructions on how to generate coverage reports to the documentation.
* Renamed GreenFile to Green_fileobject, to better reflect its purpose.
* Deprecated erpc method in tpool.py
* Bug fixes in: wsgi.py, twistedr.py, poll.py, greenio.py, util.py, select.py, processes.py, selects.py
0.9.0
=====
* Full-duplex sockets (simultaneous readers and writers in the same process).
* Remove modules that distract from the core mission of making it straightforward to write event-driven networking apps:
httpd, httpc, channel, greenlib, httpdate, jsonhttp, logutil
* Removed test dependency on sqlite, using nose instead.
* Marked known-broken tests using nose's mechanism (most of these are not broken but are simply run in the incorrect context, such as threading-related tests that are incompatible with the libevent hub).
* Remove copied code from python standard libs (in tests).
* Added eventlet.patcher which can be used to import "greened" modules.
0.8.16
======
* GreenSSLObject properly masks ZeroReturnErrors with an empty read; with unit test.
* Fixed 2.6 SSL compatibility issue.
0.8.15
======
* GreenSSL object no longer converts ZeroReturnErrors into empty reads, because that is more compatible with the underlying SSLConnection object.
* Fixed issue caused by SIGCHLD handler in processes.py
* Stopped supporting string exceptions in saranwrap and fixed a few test failures.
0.8.14
======
* Fixed some more Windows compatibility problems, resolving EVT-37 :
http://jira.secondlife.com/browse/EVT-37
* waiting() method on Pool class, which was lost when the Pool implementation
replaced CoroutinePool.
0.8.13
======
* 2.6 SSL compatibility patch by Marcus Cavanaugh.
* Added greenlet and pyopenssl as dependencies in setup.py.
0.8.12
======
* The ability to resize() pools of coroutines, which was lost when the
Pool implementation replaced CoroutinePool.
* Fixed Cesar's issue with SSL connections, and furthermore did a
complete overhaul of SSL handling in eventlet so that it's much closer
to the behavior of the built-in libraries. In particular, users of
GreenSSL sockets must now call shutdown() before close(), exactly
like SSL.Connection objects.
* A small patch that makes Eventlet work on Windows. This is the first
release of Eventlet that works on Windows.
0.8.11
======
Eventlet can now run on top of twisted reactor. Twisted-based hub is enabled automatically if
twisted.internet.reactor is imported. It is also possible to "embed" eventlet into a twisted
application via eventlet.twistedutil.join_reactor. See the examples for details.
A new package, eventlet.twistedutil, is added that makes integration of twisted and eventlet
easier. It has block_on function that allows to wait for a Deferred to fire and it wraps
twisted's Protocol in a synchronous interface. This is similar to and is inspired by Christopher
Armstrong's corotwine library. Thanks to Dan Pascu for reviewing the package.
Another new package, eventlet.green, was added to provide some of the standard modules
that are fixed not to block other greenlets. This is an alternative to monkey-patching
the socket, which is impossible to do if you are running twisted reactor.
The package includes socket, httplib, urllib2.
Much of the core functionality has been refactored and cleaned up, including the removal
of eventlet.greenlib. This means that it is now possible to use plain greenlets without
modification in eventlet, and the subclasses of greenlet instead of the old
eventlet.greenlib.GreenletContext. Calling eventlet.api.get_hub().switch() now checks to
see whether the current greenlet has a "switch_out" method and calls it if so, providing the
same functionality that the GreenletContext.swap_out used to. The swap_in behavior can be
duplicated by overriding the switch method, and the finalize functionality can be duplicated
by having a try: finally: block around the greenlet's main implementation. The eventlet.backdoor
module has been ported to this new scheme, although it's signature had to change slightly so
existing code that used the backdoor will have to be modified.
A number of bugs related to improper scheduling of switch calls has been fixed.
The fixed functions and classes include api.trampoline, api.sleep, coros.event,
coros.semaphore, coros.queue.
Many methods of greenio.GreenSocket were fixed to make its behavior more like that of a regular
socket. Thanks to Marcin Bachry for fixing GreenSocket.dup to preserve the timeout.
Added proc module which provides an easy way to subscribe to coroutine's results. This makes
it easy to wait for a single greenlet or for a set of greenlets to complete.
wsgi.py now supports chunked transfer requests (patch by Mike Barton)
The following modules were deprecated or removed because they were broken:
hubs.nginx, hubs.libev, support.pycurls, support.twisteds, cancel method of coros.event class
The following classes are still present but will be removed in the future version:
- channel.channel (use coros.Channel)
- coros.CoroutinePool (use pool.Pool)
saranwrap.py now correctly closes the child process when the referring object is deleted,
received some fixes to its detection of child process death, now correctly deals with the in
keyword, and it is now possible to use coroutines in a non-blocking fashion in the child process.
Time-based expiry added to db_pool. This adds the ability to expire connections both by idleness
and also by total time open. There is also a connection timeout option.
A small bug in httpd's error method was fixed.
Python 2.3 is no longer supported.
A number of tests was added along with a script to run all of them for all the configurations.
The script generates an html page with the results.
Thanks to Brian Brunswick for investigation of popen4 badness (eventlet.process)
Thanks to Marcus Cavanaugh for pointing out some coros.queue(0) bugs.
The twisted integration as well as many other improvements were funded by AG Projects (http://ag-projects.com), thanks!
0.8.x
=====
Fix a CPU leak that would cause the poll hub to consume 100% CPU in certain conditions, for example the echoserver example. (Donovan Preston)
Fix the libev hub to match libev's callback signature. (Patch by grugq)
Add a backlog argument to api.tcp_listener (Patch by grugq)
0.7.x
=====
Fix a major memory leak when using the libevent or libev hubs. Timers were not being removed from the hub after they fired. (Thanks Agusto Becciu and the grugq). Also, make it possible to call wrap_socket_with_coroutine_socket without using the threadpool to make dns operations non-blocking (Thanks the grugq).
It's now possible to use eventlet's SSL client to talk to eventlet's SSL server. (Thanks to Ryan Williams)
Fixed a major CPU leak when using select hub. When adding a descriptor to the hub, entries were made in all three dictionaries, readers, writers, and exc, even if the callback is None. Thus every fd would be passed into all three lists when calling select regardless of whether there was a callback for that event or not. When reading the next request out of a keepalive socket, the socket would come back as ready for writing, the hub would notice the callback is None and ignore it, and then loop as fast as possible consuming CPU.
0.6.x
=====
Fixes some long-standing bugs where sometimes failures in accept() or connect() would cause the coroutine that was waiting to be double-resumed, most often resulting in SwitchingToDeadGreenlet exceptions as well as weird tuple-unpacking exceptions in the CoroutinePool main loop.
0.6.1: Added eventlet.tpool.killall. Blocks until all of the threadpool threads have been told to exit and join()ed. Meant to be used to clean up the threadpool on exit or if calling execv. Used by Spawning.
0.5.x
=====
"The Pycon 2008 Refactor": The first release which incorporates libevent support. Also comes with significant refactoring and code cleanup, especially to the eventlet.wsgi http server. Docstring coverage is much higher and there is new extensive documentation: http://wiki.secondlife.com/wiki/Eventlet/Documentation
The point releases of 0.5.x fixed some bugs in the wsgi server, most notably handling of Transfer-Encoding: chunked; previously, it would happily send chunked encoding to clients which asked for HTTP/1.0, which isn't legal.
0.2
=====
Initial re-release of forked linden branch.
0707010000000E000081A400000000000000000000000166E1887C00000DDF000000000000000000000000000000000000003200000000eventlet-0.37.0+git.1726056572.8637820/README.rst Warning
=======
**New usages of eventlet are now heavily discouraged! Please read the
following.**
Eventlet was created almost 18 years ago, at a time where async
features were absent from the CPython stdlib. With time eventlet evolved and
CPython too, but since several years the maintenance activity of eventlet
decreased leading to a growing gap between eventlet and the CPython
implementation.
This gap is now too high and can lead you to unexpected side effects and bugs
in your applications.
Eventlet now follows a new maintenance policy. **Only maintenance for
stability and bug fixing** will be provided. **No new features will be
accepted**, except those related to the asyncio migration. **Usages in new
projects are discouraged**. **Our goal is to plan the retirement of eventlet**
and to give you ways to move away from eventlet.
If you are looking for a library to manage async network programming,
and if you do not yet use eventlet, then, we encourage you to use `asyncio`_,
which is the official async library of the CPython stdlib.
If you already use eventlet, we hope to enable migration to asyncio for some use
cases; see `Migrating off of Eventlet`_. Only new features related to the migration
solution will be accepted.
If you have questions concerning maintenance goals or concerning
the migration do not hesitate to `open a new issue`_, we will be happy to
answer them.
.. _asyncio: https://docs.python.org/3/library/asyncio.html
.. _open a new issue: https://github.com/eventlet/eventlet/issues/new
.. _Migrating off of Eventlet: https://eventlet.readthedocs.io/en/latest/asyncio/migration.html#migration-guide
Eventlet
========
.. image:: https://img.shields.io/pypi/v/eventlet
:target: https://pypi.org/project/eventlet/
.. image:: https://img.shields.io/github/actions/workflow/status/eventlet/eventlet/test.yaml?branch=master
:target: https://github.com/eventlet/eventlet/actions?query=workflow%3Atest+branch%3Amaster
.. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg
:target: https://codecov.io/gh/eventlet/eventlet
Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
It uses epoll or libevent for highly scalable non-blocking I/O. Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O. The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
It's easy to get started using Eventlet, and easy to convert existing
applications to use it. Start off by looking at the `examples`_,
`common design patterns`_, and the list of `basic API primitives`_.
.. _examples: https://eventlet.readthedocs.io/en/latest/examples.html
.. _common design patterns: https://eventlet.readthedocs.io/en/latest/design_patterns.html
.. _basic API primitives: https://eventlet.readthedocs.io/en/latest/basic_usage.html
Getting Eventlet
================
The easiest way to get Eventlet is to use pip::
pip install -U eventlet
To install latest development version once::
pip install -U https://github.com/eventlet/eventlet/archive/master.zip
Building the Docs Locally
=========================
To build a complete set of HTML documentation::
tox -e docs
The built html files can be found in doc/build/html afterward.
Supported Python versions
=========================
Python 3.7-3.12 are currently supported.
0707010000000F000081A400000000000000000000000166E1887C00000394000000000000000000000000000000000000003300000000eventlet-0.37.0+git.1726056572.8637820/SECURITY.md # Security Policy
## Supported Versions
master branch and latest release get priority support. You should expect all known problems fixed in master.
All other released versions receive security updates per request.
If you use some old version and can not upgrade for any or no reason, ask for security update release, most likely you will get it.
## Reporting a Vulnerability
Contact current maintainers. At 2021-03: temotor@gmail.com or https://t.me/temotor
If that doesn't work, open Github issue just asking for private communication channel.
This is volunteer maintained project, all issues are processed on best effort basis, no deadlines promised. Of course, security vulnerabilities get priority over regular issues.
You can expect fame in history or maybe you prefer anonymity - say what you prefer.
Thank you for responsible handling of security problems. Your attention and effort are appreciated.
07070100000010000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000003200000000eventlet-0.37.0+git.1726056572.8637820/benchmarks 07070100000011000081A400000000000000000000000166E1887C00001B3D000000000000000000000000000000000000003E00000000eventlet-0.37.0+git.1726056572.8637820/benchmarks/__init__.py import argparse
import gc
import importlib
import inspect
import math
import random
import re
import sys
import timeit
import eventlet
# legacy, TODO convert context/localhost_socket benchmarks to new way
def measure_best(repeat, iters,
common_setup='pass',
common_cleanup='pass',
*funcs):
funcs = list(funcs)
results = {f: [] for f in funcs}
for _ in range(repeat):
random.shuffle(funcs)
for func in funcs:
gc.collect()
t = timeit.Timer(func, setup=common_setup)
results[func].append(t.timeit(iters))
common_cleanup()
best_results = {}
for func, times in results.items():
best_results[func] = min(times)
return best_results
class Benchmark:
func = None
name = ''
iters = 0
ns_per_op = 0
allocs_per_op = 0
mb_per_s = 0
def __init__(self, **kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
raise AttributeError(k)
setattr(self, k, v)
def __str__(self):
kvs = ', '.join('{}={}'.format(k, v) for k, v in self.__dict__.items() if not k.startswith('_'))
return 'Benchmark<{}>'.format(kvs)
__repr__ = __str__
def format_result(self, name_pad_to=64):
# format compatible with golang.org/x/tools/cmd/benchcmp
return "Benchmark_{b.name}{pad}\t{b.iters}\t{b.ns_per_op} ns/op".format(
b=self, pad=' ' * (name_pad_to + 1 - len(self.name)))
def run(self, repeat=5):
wrapper_time = _run_timeit(self.func, 0)
times = []
for _ in range(repeat):
t = _run_timeit(self.func, self.iters)
if t == 0.0:
raise Exception('{} time=0'.format(repr(self)))
times.append(t)
best_time = min(times) - wrapper_time
self.ns_per_op = int((best_time * 1e9) / self.iters)
def _run_timeit(func, number):
# common setup
gc.collect()
manager = getattr(func, '_benchmark_manager', None)
try:
# TODO collect allocations count, memory usage
# TODO collect custom MB/sec metric reported by benchmark
if manager is not None:
with manager(number) as ctx:
return timeit.Timer(lambda: func(ctx)).timeit(number=number)
else:
return timeit.Timer(func).timeit(number=number)
finally:
# common cleanup
eventlet.sleep(0.01)
def optimal_iters(func, target_time):
'''Find optimal number of iterations to run func closely >= target_time.
'''
iters = 1
target_time = float(target_time)
max_iters = int(getattr(func, '_benchmark_max_iters', 0))
# TODO automatically detect non-linear time growth
scale_factor = getattr(func, '_benchmark_scale_factor', 0.0)
for _ in range(10):
if max_iters and iters > max_iters:
return max_iters
# print('try iters={iters}'.format(**locals()))
t = _run_timeit(func, number=iters)
# print('... t={t}'.format(**locals()))
if t >= target_time:
return iters
if scale_factor:
iters *= scale_factor
continue
# following assumes and works well for linear complexity target functions
if t < (target_time / 2):
# roughly target half optimal time, ensure iterations keep increasing
iters = iters * (target_time / t / 2) + 1
# round up to nearest power of 10
iters = int(10 ** math.ceil(math.log10(iters)))
elif t < target_time:
# half/double dance is less prone to overshooting iterations
iters *= 2
raise Exception('could not find optimal iterations for time={} func={}'.format(target_time, repr(func)))
def collect(filter_fun):
# running `python benchmarks/__init__.py` or `python -m benchmarks`
# puts .../eventlet/benchmarks at top of sys.path, fix it to project root
if sys.path[0].endswith('/benchmarks'):
path = sys.path.pop(0)
correct = path.rsplit('/', 1)[0]
sys.path.insert(0, correct)
common_prefix = 'benchmark_'
result = []
# TODO step 1: put all toplevel benchmarking code under `if __name__ == '__main__'`
# TODO step 2: auto import benchmarks/*.py, remove whitelist below
# TODO step 3: convert existing benchmarks
for name in ('hub_timers', 'spawn'):
mod = importlib.import_module('benchmarks.' + name)
for name, obj in inspect.getmembers(mod):
if name.startswith(common_prefix) and inspect.isfunction(obj):
useful_name = name[len(common_prefix):]
if filter_fun(useful_name):
result.append(Benchmark(name=useful_name, func=obj))
return result
def noop(*a, **kw):
pass
def configure(manager=None, scale_factor=0.0, max_iters=0):
def wrapper(func):
func._benchmark_manager = manager
func._benchmark_scale_factor = scale_factor
func._benchmark_max_iters = max_iters
return func
return wrapper
def main():
cmdline = argparse.ArgumentParser(description='Run benchmarks')
cmdline.add_argument('-autotime', default=3.0, type=float, metavar='seconds',
help='''autoscale iterations close to this time per benchmark,
in seconds (default: %(default).1f)''')
cmdline.add_argument('-collect', default=False, action='store_true',
help='stop after collecting, useful for debugging this tool')
cmdline.add_argument('-filter', default='', metavar='regex',
help='process benchmarks matching regex (default: all)')
cmdline.add_argument('-iters', default=None, type=int, metavar='int',
help='force this number of iterations (default: auto)')
cmdline.add_argument('-repeat', default=5, type=int, metavar='int',
help='repeat each benchmark, report best result (default: %(default)d)')
args = cmdline.parse_args()
filter_re = re.compile(args.filter)
bs = collect(filter_re.search)
if args.filter and not bs:
# TODO stderr
print('error: no benchmarks matched by filter "{}"'.format(args.filter))
sys.exit(1)
if args.collect:
bs.sort(key=lambda b: b.name)
print('\n'.join(b.name for b in bs))
return
if not bs:
raise Exception('no benchmarks to run')
# execute in random order
random.shuffle(bs)
for b in bs:
b.iters = args.iters or optimal_iters(b.func, target_time=args.autotime)
b.run()
# print results in alphabetic order
max_name_len = max(len(b.name) for b in bs)
bs.sort(key=lambda b: b.name)
for b in bs:
print(b.format_result(name_pad_to=max_name_len))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
07070100000012000081A400000000000000000000000166E1887C000000A3000000000000000000000000000000000000003E00000000eventlet-0.37.0+git.1726056572.8637820/benchmarks/__main__.py # this file is required to support `python -m benchmarks` syntax
import sys
import benchmarks
try:
benchmarks.main()
except KeyboardInterrupt:
sys.exit(1)
07070100000013000081A400000000000000000000000166E1887C000008C1000000000000000000000000000000000000003D00000000eventlet-0.37.0+git.1726056572.8637820/benchmarks/context.py """Test context switching performance of threading and eventlet"""
import threading
import time
import eventlet
import eventlet.hubs
CONTEXT_SWITCHES = 100000
def run(event, wait_event):
counter = 0
while counter <= CONTEXT_SWITCHES:
wait_event.wait()
wait_event.reset()
counter += 1
event.send()
def test_eventlet():
event1 = eventlet.event.Event()
event2 = eventlet.event.Event()
event1.send()
thread1 = eventlet.spawn(run, event1, event2)
thread2 = eventlet.spawn(run, event2, event1)
thread1.wait()
thread2.wait()
class BenchThread(threading.Thread):
def __init__(self, event, wait_event):
threading.Thread.__init__(self)
self.counter = 0
self.event = event
self.wait_event = wait_event
def run(self):
while self.counter <= CONTEXT_SWITCHES:
self.wait_event.wait()
self.wait_event.clear()
self.counter += 1
self.event.set()
def test_thread():
event1 = threading.Event()
event2 = threading.Event()
event1.set()
thread1 = BenchThread(event1, event2)
thread2 = BenchThread(event2, event1)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print("Testing with %d context switches" % CONTEXT_SWITCHES)
start = time.time()
test_thread()
print("threading: %.02f seconds" % (time.time() - start))
try:
eventlet.hubs.use_hub("eventlet.hubs.epolls")
start = time.time()
test_eventlet()
print("epoll: %.02f seconds" % (time.time() - start))
except:
print("epoll hub unavailable")
try:
eventlet.hubs.use_hub("eventlet.hubs.kqueue")
start = time.time()
test_eventlet()
print("kqueue: %.02f seconds" % (time.time() - start))
except:
print("kqueue hub unavailable")
try:
eventlet.hubs.use_hub("eventlet.hubs.poll")
start = time.time()
test_eventlet()
print("poll: %.02f seconds" % (time.time() - start))
except:
print("poll hub unavailable")
try:
eventlet.hubs.use_hub("eventlet.hubs.selects")
start = time.time()
test_eventlet()
print("select: %.02f seconds" % (time.time() - start))
except:
print("select hub unavailable")
07070100000014000081A400000000000000000000000166E1887C000002A1000000000000000000000000000000000000004000000000eventlet-0.37.0+git.1726056572.8637820/benchmarks/hub_timers.py '''Benchmark timer adds & expires on hubs.hub.BaseHub
'''
import contextlib
import random
import benchmarks
from eventlet.hubs import timer, get_hub
l = []
hub = get_hub()
def work(n):
l.append(n)
@contextlib.contextmanager
def setup(iters):
l[:] = []
timeouts = [random.uniform(0, 10) for x in range(iters)]
yield timeouts
@benchmarks.configure(manager=setup, scale_factor=3)
def benchmark_hub_timers(timeouts):
scheduled = []
for timeout in timeouts:
t = timer.Timer(timeout, work, timeout)
t.schedule()
scheduled.append(t)
hub.prepare_timers()
hub.fire_timers(hub.clock() + 11)
hub.prepare_timers()
07070100000015000081A400000000000000000000000166E1887C00000D97000000000000000000000000000000000000004600000000eventlet-0.37.0+git.1726056572.8637820/benchmarks/localhost_socket.py """Benchmark evaluating eventlet's performance at speaking to itself over a localhost socket."""
import time
import benchmarks
BYTES = 1000
SIZE = 1
CONCURRENCY = 50
TRIES = 5
def reader(sock):
expect = BYTES
while expect > 0:
d = sock.recv(min(expect, SIZE))
expect -= len(d)
def writer(addr, socket_impl):
sock = socket_impl(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
sent = 0
while sent < BYTES:
d = 'xy' * (max(min(SIZE / 2, BYTES - sent), 1))
sock.sendall(d)
sent += len(d)
def green_accepter(server_sock, pool):
for i in range(CONCURRENCY):
sock, addr = server_sock.accept()
pool.spawn_n(reader, sock)
def heavy_accepter(server_sock, pool):
for i in range(CONCURRENCY):
sock, addr = server_sock.accept()
t = threading.Thread(None, reader, "reader thread", (sock,))
t.start()
pool.append(t)
import eventlet.green.socket
import eventlet
from eventlet import debug
debug.hub_exceptions(True)
def launch_green_threads():
pool = eventlet.GreenPool(CONCURRENCY * 2 + 1)
server_sock = eventlet.green.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(50)
addr = ('localhost', server_sock.getsockname()[1])
pool.spawn_n(green_accepter, server_sock, pool)
for i in range(CONCURRENCY):
pool.spawn_n(writer, addr, eventlet.green.socket.socket)
pool.waitall()
import threading
import socket
def launch_heavy_threads():
threads = []
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(50)
addr = ('localhost', server_sock.getsockname()[1])
accepter_thread = threading.Thread(
None, heavy_accepter, "accepter thread", (server_sock, threads))
accepter_thread.start()
threads.append(accepter_thread)
for i in range(CONCURRENCY):
client_thread = threading.Thread(None, writer, "writer thread", (addr, socket.socket))
client_thread.start()
threads.append(client_thread)
for t in threads:
t.join()
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser()
parser.add_option('--compare-threading', action='store_true', dest='threading', default=False)
parser.add_option('-b', '--bytes', type='int', dest='bytes',
default=BYTES)
parser.add_option('-s', '--size', type='int', dest='size',
default=SIZE)
parser.add_option('-c', '--concurrency', type='int', dest='concurrency',
default=CONCURRENCY)
parser.add_option('-t', '--tries', type='int', dest='tries',
default=TRIES)
opts, args = parser.parse_args()
BYTES = opts.bytes
SIZE = opts.size
CONCURRENCY = opts.concurrency
TRIES = opts.tries
funcs = [launch_green_threads]
if opts.threading:
funcs = [launch_green_threads, launch_heavy_threads]
results = benchmarks.measure_best(TRIES, 3,
lambda: None, lambda: None,
*funcs)
print("green:", results[launch_green_threads])
if opts.threading:
print("threads:", results[launch_heavy_threads])
print("%", (results[launch_green_threads] - results[launch_heavy_threads]
) / results[launch_heavy_threads] * 100)
07070100000016000081A400000000000000000000000166E1887C00000555000000000000000000000000000000000000003B00000000eventlet-0.37.0+git.1726056572.8637820/benchmarks/spawn.py import contextlib
import eventlet
import benchmarks
def dummy(i=None):
return i
def linked(gt, arg):
return arg
def benchmark_sleep():
eventlet.sleep()
def benchmark_spawn_link1():
t = eventlet.spawn(dummy)
t.link(linked, 1)
t.wait()
def benchmark_spawn_link5():
t = eventlet.spawn(dummy)
t.link(linked, 1)
t.link(linked, 2)
t.link(linked, 3)
t.link(linked, 4)
t.link(linked, 5)
t.wait()
def benchmark_spawn_link5_unlink3():
t = eventlet.spawn(dummy)
t.link(linked, 1)
t.link(linked, 2)
t.link(linked, 3)
t.link(linked, 4)
t.link(linked, 5)
t.unlink(linked, 3)
t.wait()
@benchmarks.configure(max_iters=1e5)
def benchmark_spawn_nowait():
eventlet.spawn(dummy, 1)
def benchmark_spawn():
eventlet.spawn(dummy, 1).wait()
@benchmarks.configure(max_iters=1e5)
def benchmark_spawn_n():
eventlet.spawn_n(dummy, 1)
@benchmarks.configure(max_iters=1e5)
def benchmark_spawn_n_kw():
eventlet.spawn_n(dummy, i=1)
@contextlib.contextmanager
def pool_setup(iters):
pool = eventlet.GreenPool(iters)
yield pool
pool.waitall()
@benchmarks.configure(manager=pool_setup)
def benchmark_pool_spawn(pool):
pool.spawn(dummy, 1)
@benchmarks.configure(manager=pool_setup, max_iters=1e5)
def benchmark_pool_spawn_n(pool):
pool.spawn_n(dummy, 1)
07070100000017000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000002B00000000eventlet-0.37.0+git.1726056572.8637820/bin 07070100000018000081ED00000000000000000000000166E1887C00000735000000000000000000000000000000000000003900000000eventlet-0.37.0+git.1726056572.8637820/bin/bench-compare #!/bin/bash
set -eu
cd "$( dirname "${BASH_SOURCE[0]}" )/.."
main() {
local python="$(which python)"
local base="master"
while [[ $# -gt 0 ]] ; do
case $1 in
-h*|--help)
echo "usage: $(basename $0) [-python $python] [-base master] [-- benchmarks runner args]" >&2
exit 0
;;
-base)
shift
base="$1"
;;
-python)
shift
python="$1"
if [[ -d "$python" ]] ; then
echo ". assume $python is virtualenv" >&2
python="$python/bin/python"
fi
if [[ ! -x "$python" ]] ; then
echo "error: invalid python path: $python" >&2
exit 1
fi
;;
--)
shift
break
;;
esac
shift
done
local benchmark_args="$@"
local run_benchmarks="$python -m benchmarks $benchmark_args"
local bnames=""
if ! bnames=$($run_benchmarks -collect) ; then
echo "$bnames" >&2
exit 1
fi
echo "- run benchmarks for new code" >&2
for n in $bnames ; do $run_benchmarks -filter "$n\$" ; done >.bench-new.txt
local stashed=0
if [[ -n "$(git status --short -uall)" ]] ; then
echo ". git tree is not clean, will stash." >&2
echo ". If stash is not working for you, commit changes first." >&2
git stash save --include-untracked
stashed=1
echo "" >&2
fi
echo "- checkout baseline code" >&2
git checkout "$base" -- eventlet/
echo "- run benchmarks for baseline code" >&2
for n in $bnames ; do $run_benchmarks -filter "$n\$" ; done >.bench-base.txt
echo "- return new code back" >&2
git checkout .
[[ "$stashed" -eq 1 ]] && git stash pop
echo "- benchcmp" >&2
if ! which benchcmp &>/dev/null ; then
echo "! benchcmp is not installed, trying go get" >&2
if ! which go &>/dev/null ; then
echo "! go is not installed https://golang.org/doc/install" >&2
exit 1
fi
go get golang.org/x/tools/cmd/benchcmp
fi
benchcmp .bench-{base,new}.txt
rm .bench-{base,new}.txt
}
main "$@"
07070100000019000081A400000000000000000000000166E1887C000000B0000000000000000000000000000000000000003300000000eventlet-0.37.0+git.1726056572.8637820/codecov.yml codecov:
token: 2a926756-1923-42a1-89c7-97925ea0e17a
coverage:
precision: 0
round: down
status:
project:
default:
target: auto
threshold: 3%
0707010000001A000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000002B00000000eventlet-0.37.0+git.1726056572.8637820/doc 0707010000001B000081A400000000000000000000000166E1887C00000121000000000000000000000000000000000000003C00000000eventlet-0.37.0+git.1726056572.8637820/doc/requirements.txt # The order of packages is significant, because pip processes them in the
# order of appearance. Changing the order has an impact on the overall
# integration process, which may cause wedges in the gate later.
sphinx>=2.0.0,!=2.1.0 # BSD
sphinxcontrib-apidoc>=0.2.0 # BSD
pyzmq>=25.0.0
0707010000001C000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000003200000000eventlet-0.37.0+git.1726056572.8637820/doc/source 0707010000001D000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000003D00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/_templates 0707010000001E000081A400000000000000000000000166E1887C000001EE000000000000000000000000000000000000004900000000eventlet-0.37.0+git.1726056572.8637820/doc/source/_templates/layout.html {% extends "!layout.html" %}
{% block footer %}
{{ super() }}
{% endblock %}
0707010000001F000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000003A00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio 07070100000020000081A400000000000000000000000166E1887C00000860000000000000000000000000000000000000004600000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/asyncio.rst .. _asyncio-index:
Asyncio in Eventlet
###################
Asyncio Compatibility
=====================
Compatibility between Asyncio and Eventlet has been recently introduced.
You may be interested by the state of the art of this compatibility and by
the potential limitations, so please take a look at
:ref:`asyncio-compatibility`.
Asyncio Hub & Functions
=======================
Discover the :mod:`Asyncio Hub `
You may also want to take a look to the
:mod:`Asyncio compatibility functions `.
Migrating from Eventlet to Asyncio
==================================
Why Migrating?
--------------
Eventlet is a broken and outdated technology.
Eventlet was created almost 20 years ago (See the :ref:`history` of Eventlet),
at a time where Python did not provided non-blocking features.
Time passed and Python now provide AsyncIO.
In parallel of the evolution of Python, the maintenance of Eventlet was
discontinued during several versions of Python, increasing the gap between
the monkey patching of Eventlet and the recent implementation of Python.
This gap is now not recoverable. For this reason, we decided to officially
abandon the maintenance of Eventlet in an incremental way.
In a last effort, we want to lead Eventlet to a well deserved rest.
Our goal is to provide you a guide to migrate off of Eventlet and then
to properly retire Eventlet.
For more details about the reasons who motivated this effort we invite the
readers to show the discussions related to this scheduled abandon:
https://review.opendev.org/c/openstack/governance/+/902585
Getting Started
---------------
Want to use Asyncio and Eventlet together or you simply want to migrate
off of Eventlet?
Follow the :ref:`official migration guide `.
We encourage readers to first look at the :ref:`glossary_guide` to
learn about the various terms that may be encountered during the migration.
Alternatives & Tips
-------------------
You want to refactor your code to replace Eventlet usages? See the proposed
alternatives and tips:
- :ref:`awaitlet_alternative`
- :ref:`manage-your-deprecations`
07070100000021000081A400000000000000000000000166E1887C000015EA000000000000000000000000000000000000004C00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/compatibility.rst .. _asyncio-compatibility:
Asyncio compatibility in eventlet
#################################
It should be possible to:
* Run eventlet and asyncio in the same thread.
* Allow asyncio and eventlet to interact: eventlet code can use asyncio-based libraries, asyncio-based code can get results out of eventlet.
If this works, it would allow migrating from eventlet to asyncio in a gradual manner both within and across projects:
1. Within an OpenStack library, code could be a mixture of asyncio and eventlet code.
This means migration doesn't have to be done in one stop, neither in libraries nor in the applications that depend on them.
2. Even when an OpenStack library fully migrates to asyncio, it will still be usable by anything that is still running on eventlet.
Prior art
=========
* Gevent has a similar model to eventlet.
There exists an integration between gevent and asyncio that follows model proposed below: https://pypi.org/project/asyncio-gevent/
* Twisted can run on top of the asyncio event loop.
Separately, it includes utilities for mapping its `Deferred` objects (similar to a JavaScript Promise) to the async/await model introduced in newer versions in Python 3, and in the opposite direction it added support for turning async/await functions into `Deferred`s.
In an eventlet context, `GreenThread` would need a similar former of integration to Twisted's `Deferred`.
Part 1: Implementing asyncio/eventlet interoperability
======================================================
There are three different parts involved in integrating eventlet and asyncio for purposes
1. Create a hub that runs on asyncio
------------------------------------
Like many networking frameworks, eventlet has pluggable event loops, in this case called a "hub". Typically hubs wrap system APIs like `select()` and `epoll()`, but there also used to be a hub that ran on Twisted.
Creating a hub that runs on top of the asyncio event loop should be fairly straightforward.
Once this is done, eventlet and asyncio code can run in the same process and the same thread, but they would still have difficulties talking to each other.
This latter requirement requires additional work, as covered by the next two items.
2. Calling `async def` functions from eventlet
----------------------------------------------
The goal is to allow something like this:
.. code::
import aiohttp
from eventlet_asyncio import future_to_greenlet # hypothetical API
async def get_url_body(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
return await response.text()
def eventlet_code():
green_thread = future_to_greenlet(get_url_body("https://example.com"))
return green_thread.wait()
The code would presumably be similar to https://github.com/gfmio/asyncio-gevent/blob/main/asyncio_gevent/future_to_greenlet.py
3. Calling eventlet code from asyncio
-------------------------------------
The goal is to allow something like this:
.. code::
from urllib.request import urlopen
from eventlet import spawn
from eventlet_asyncio import greenlet_to_future # hypothetical API
def get_url_body(url):
# Looks blocking, but actually isn't
return urlopen(url).read()
# This would likely be common pattern, so could be implemented as decorator...
async def asyncio_code():
greenlet = eventlet.spawn(get_url_body, "https://example.com")
future = greenlet_to_future(greenlet)
return await future
The code would presumably be similar to https://github.com/gfmio/asyncio-gevent/blob/main/asyncio_gevent/future_to_greenlet.py
4. Limitations and potential unexpected behavior
------------------------------------------------
``concurrent.futures.thread`` just uses normal threads, not Eventlet's special threads.
Similarly, `asyncio.to_thread() `_
specifically requires regular blocking code, it won't work correctly with Eventlet code.
Multiple readers are not supported by the Asyncio hub.
Part 2: How a port would work on a technical level
==================================================
Porting a library
=================
1. Usage of eventlet-based APIs would be replaced with usage of asyncio APIs.
For example, `urllib` or `requests` might be replaced with `aiohttp `_.
The interoperability above can be used to make sure this continues to work with eventlet-based APIs.
The `awesome-asyncio `_ github repository propose a curated list of awesome
Python asyncio frameworks, libraries, software and resources. Do not hesitate to take a look at it. You may find
candidates compatible with asyncio that can allow you to replace some of your actual underlying libraries.
2. Over time, APIs would need be migrated to be `async` function, but in the intermediate time frame a standard `def` can still be used, again using the interoperability layer above.
3. Eventually all "blocking" APIs have been removed, at which point everything can be switched to `async def` and `await`, including external API, and the library will no longer depend on eventlet.
Porting an application
======================
An application would need to install the asyncio hub before kicking off eventlet.
Beyond that porting would be the same as a library.
Once all libraries are purely asyncio-based, eventlet usage can be removed and an asyncio loop run instead.
07070100000022000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000004000000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/guide 07070100000023000081A400000000000000000000000166E1887C0000072A000000000000000000000000000000000000004D00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/guide/awaitlet.rst .. _awaitlet_alternative:
Awaitlet as an Alternative
==========================
Applications with several years of existence may have seen their code base
growing again and again, thus, migrating this kind of existing code
base toward AsyncIO, would be painful or even unrealistic. For most of these
applications, migrating to AsyncIO would may mean a complete rewriting of
these applications.
`Awaitlet `_ is an alternative
which allow you to migrate this kind of existing code base without getting
the headaches associated to migrating such deliverables.
Awaitlet allows existing programs written to use threads and blocking APIs to
be ported to asyncio, by replacing frontend and backend code with asyncio
compatible approaches, but allowing intermediary code to remain completely
unchanged, with no addition of ``async`` or ``await`` keywords throughout the
entire codebase needed. Its primary use is to support code that is
cross-compatible with asyncio and non-asyncio runtime environments.
Awaitlet is a direct extract of `SQLAlchemy `_’s
own asyncio mediation layer, with no dependencies on SQLAlchemy. This code has
been in widespread production use in thousands of environments for several
years.
.. warning::
Using Awaitlet require to use the :mod:`Asyncio Hub
`
:ref:`understanding_hubs`
Here is an example of Awaitlet usage::
import asyncio
import awaitlet
def asyncio_sleep():
return awaitlet.awaitlet(asyncio.sleep(5, result='hello'))
print(asyncio.run(awaitlet.async_def(asyncio_sleep)))
We invite the reader to read the `Awaitlet synopsis
`_ to get a better
overview of the opportunities offered by this library.
07070100000024000081A400000000000000000000000166E1887C00000721000000000000000000000000000000000000005000000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/guide/deprecation.rst .. _manage-your-deprecations:
Manage Your Deprecations
========================
Libraries or applications may have specific features who are strongly related
to Eventlet, like the ``heartbeat_in_pthread`` feature in
the Opentack `oslo.messaging
`_
deliverable.
Migrating off of Eventlet would make these features obsolete. As this kind of
feature expose configuration endpoints people would have to deprecate them to
allow your users to update their config files accordingly. However, the
deprecation process would take several months or even numerous versions before
hoping to see these features removed. Hence blocking the migration.
The proposed solution is to mock these features with empty entrypoints
who will only raise deprecation warnings to inform your users that they have
to update their config files. After 1 or 2 new versions these empty mocks
could be safely removed without impacting anybody.
In other words, these feature will remain in the code, but they will do
nothing. They will be empty feature allowing us to migrate properly.
Example with the ``heartbeat_in_pthread`` feature, by using Asyncio
we wouldn't have to run heartbeats in a separated threads. This feature,
the RabbitMQ heartbeat, would be run in a coroutine. A coroutine who is
ran in the main native thread. The config option will remain available but
it will only show a deprecation warning like the following one::
__main__:1: DeprecationWarning: Using heartbeat_in_pthread is
deprecated and will be removed in {SERIES}. Enabling that feature
have no functional effects due to recent changes applied in the
networking model used by oslo.messaging. Please plan an update of your
configuration.
07070100000025000081A400000000000000000000000166E1887C000014B3000000000000000000000000000000000000004D00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/guide/glossary.rst .. _glossary_guide:
Glossary
========
This glossary provides a brief description of some of the terms used within
Eventlet in general, and more specifically in the migration context.
The goal of this glossary is to ensure that everybody has the same
understanding of the used terms.
For more information about anything the migration, see the
:ref:`migration-guide`.
.. _glossary-concurrency:
Concurrency
-----------
**Concurrency** is when two or more tasks can start, run, and complete in
overlapping time **periods**. It doesn't necessarily mean they'll ever both be
running **at the same instant**. For example, _multitasking_ on a single-core
machine.
.. _glossary-cooperative-multitasking:
Cooperative Multitasking
------------------------
Whenever a **thread** begins sleeping or awaiting network I/O, there is a
chance for another thread to take the **GIL** and execute Python code.
This is **cooperative multitasking**.
.. _glossary-coro:
Coro
----
Using the name **coro** is a common convention in the Python API
documentation. It refers to a coroutine; i.e., strictly speaking, the result
of calling an async def function, and not the function itself.
.. _glossary-coroutine:
Coroutine
---------
**Coroutines** are programs components that allow execution to be suspended
and resumed, generalizing. They have been described as "functions whose
execution you can pause".
.. _glossary-future:
Future
------
A **future** represents a future completion state of some activity and is
managed by the loop. A Future is a special low-level awaitable object that
represents an eventual result of an asynchronous operation.
.. _glossary-greenlet:
Greenlet
--------
A **greenlet** is a lightweight **coroutine** for in-process sequential
concurrent programming (see **concurrency**). You can usually think of
greenlets as cooperatively scheduled **threads**. The major differences are
that since they’re cooperatively scheduled, you are in control of when they
execute, and since they are **coroutines**, many greenlets can exist in a
single native **thread**.
Greenlets are cooperative (see **cooperative multitasking**) and sequential.
This means that when one greenlet is running, no other greenlet can be
running; the programmer is fully in control of when execution switches between
greenlets. In other words ones, when using greenlets, should not expect
**preemptive** behavior.
Greenlet is also the name of a `library
`_ that provide the greenlet
mechanism. Eventlet is based on the greenlet library.
.. _glossary-green-thread:
Green Thread
------------
A **green thread** is a **threads** that is scheduled by a runtime library
or virtual machine (VM) instead of natively by the underlying operating system
(OS). Green threads emulate multithreaded environments without relying on any
native OS abilities, and they are managed in user space) instead of kernel
space, enabling them to work in environments that do not have native thread
support.
.. _glossary-gil:
Global Interpreter Lock (GIL)
-----------------------------
A **global interpreter lock (GIL**) is a lock used internally to CPython to
ensure that only one **thread** runs in the Python VM at a time. In general,
Python offers to switch among threads only between bytecode instructions (see
**preemptive multitasking** and **cooperative multitasking**).
.. _glossary-parallelism:
Parallelism
-----------
**Parallelism** is when tasks _literally_ run at the same time, e.g., on a
multicore processor. A condition that arises when at least two threads are
executing simultaneously.
.. _glossary-preemptive:
Preemptive/Preemption
---------------------
**Preemption** is the act of temporarily interrupting an executing **task**,
with the intention of resuming it at a later time. This interrupt is done by
an external scheduler with no assistance or cooperation from the task.
.. _glossary-preemptive-multitasking:
Preemptive multitasking
-----------------------
**Preemptive multitasking** involves the use of an interrupt mechanism which
suspends the currently executing process and invokes a scheduler to determine
which process should execute next. Therefore, all processes will get some
amount of CPU time at any given time.
CPython also has _preemptive multitasking_: If a thread runs
uninterrupted for 1000 bytecode instructions in Python 2, or runs 15
milliseconds in Python 3, then it gives up the GIL and another thread may run.
.. _glossary-task:
Task
----
A **task** is a scheduled and independently managed **coroutine**. Tasks are
awaitable objects used to schedule coroutines concurrently.
.. _glossary-thread:
Thread
------
**Threads** are a way for a program to divide (termed "split") itself into two
or more simultaneously (or pseudo-simultaneously) running tasks. Threads and
processes differ from one operating system to another but, in general, a
thread is contained inside a process and different threads in the same process
share same resources while different processes in the same multitasking
operating system do not.
When do threads switch in Python? The switch depends on the context. The
threads may be interrupted (see **preemptive multitasking**) or behave
cooperatively (see **cooperative multitasking**).
07070100000026000081A400000000000000000000000166E1887C00001730000000000000000000000000000000000000004800000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/migration.rst .. _migration-guide:
Migrating off of Eventlet
=========================
There are two main use cases for Eventlet:
1. As a required networking framework, much like one would use ``asyncio``,
``trio``, or older frameworks like ``Twisted`` and ``tornado``.
2. As an optional, pluggable backend that allows swapping out blocking APIs
for an event loop, transparently, without changing any code.
This is how Celery and Gunicorn use eventlet.
Pretending to look like a blocking API while actually using an event loop
underneath requires exact emulation of an ever-changing and ever-increasing
API footprint, which is fundamentally unsustainable for a volunteer-driven
open source project.
This is why Eventlet is discouraging new users.
**Most of this document will focus on the first use case: Eventlet as the sole
networking framework.**
For this use case, we recommend migrating to Python's ``asyncio``, and we are
providing infrastructure that will make this much easier, and allow for
*gradual* migration.
For the second use case, we believe this is a fundamentally unsustainable
approach and encourage the upstream frameworks to come up with different
solutions.
Step 1. Switch to the ``asyncio`` Hub
-------------------------------------
Eventlet has different pluggable networking event loops.
By switching the event loop to use ``asyncio``, you enable running ``asyncio``
and Eventlet code in the same thread in the same process.
To do so, set the ``EVENTLET_HUB`` environment variable to ``asyncio`` before
starting your Eventlet program.
For example, if you start your program with a shell script, you can do
``export EVENTLET_HUB=asyncio``.
Alternatively, you can explicitly specify the ``asyncio`` hub at startup,
before monkey patching or any other setup work::
import eventlet.hubs
eventlet.hubs.use_hub("eventlet.hubs.asyncio")
Step 2. Migrate code to ``asyncio``
-----------------------------------
Now that you're running Eventlet on top of ``asyncio``, you can use some new
APIs to call from Eventlet code into ``asyncio``, and vice-versa.
To call ``asyncio`` code from Eventlet code, you can wrap a coroutine (or
anything you can ``await``) into an Eventlet ``GreenThread``.
For example, if you want to make a HTTP request from Eventlet, you can use
the ``asyncio``-based ``aiohttp`` library::
import aiohttp
from eventlet.asyncio import spawn_for_awaitable
async def request():
async with aiohttp.ClientSession() as session:
url = "https://example.com"
async with session.get(url) as response:
html = await response.text()
return html
# This makes a coroutine; typically you'd ``await`` it:
coro = request()
# You can wrap this coroutine with an Eventlet GreenThread, similar to
# ``evenlet.spawn()``:
gthread = spawn_for_awaitable(request())
# And then get its result, the body of https://example.com:
result = gthread.wait()
In the other direction, any ``eventlet.greenthread.GreenThread`` can be
``await``-ed in ``async`` functions.
In other words ``async`` functions can call into Eventlet code::
def blocking_eventlet_api():
eventlet.sleep(1)
# do some other pseudo-blocking work
# ...
return 12
async def my_async_func():
gthread = eventlet.spawn(blocking_eventlet_api)
# In normal Eventlet code we'd call gthread.wait(), but since this is an
# async function we'll want to await instead:
result = await gthread
# result is now 12
# ...
Cancellation of ``asyncio.Future`` and killing of ``eventlet.GreenThread``
should propagate between the two.
Using these two APIs, with more to come, you can gradually migrate portions of
your application or library to ``asyncio``.
Calls to blocking APIs like ``urlopen()`` or ``requests.get()`` can get
replaced with calls to ``aiohttp``, for example.
Depending on your Eventlet usage, during your migration, you may have to
deprecate CLI options that are related to Eventlet, we invite the reader
to take a look to :ref:`manage-your-deprecations`.
The `awesome-asyncio `_ github
repository propose a curated list of awesome Python asyncio frameworks,
libraries, software and resources. Do not hesitate to take a look at it.
You may find candidates compatible with asyncio that can allow you to replace
some of your actual underlying libraries.
Step 3. Drop Eventlet altogether
--------------------------------
Eventually you won't be relying on Eventlet at all: all your code will be
``asyncio``-based.
At this point you can drop Eventlet and switch to running the ``asyncio``
loop directly.
Known limitations and work in progress
--------------------------------------
In general, ``async`` functions and Eventlet green threads are two separate
universes that just happen to be able to call each other.
In ``async`` functions:
* Eventlet thread locals probably won't work correctly.
* ``evenlet.greenthread.getcurrent()`` won't give the result you expect.
* ``eventlet`` locks and queues won't work if used directly.
* Eventlet multiple readers are not supported, and so using
``eventtlet.debug.hub_prevent_multiple_readers`` neither.
In Eventlet greenlets:
* ``asyncio`` locks won't work if used directly.
We expect to add more migration and integration APIs over time as we learn
more about what works, common idioms, and requirements for migration.
You can track progress in the
`GitHub issue `_, and file
new issues if you have problems.
Alternatives
------------
If you really want to continue with Eventlet's pretend-to-be-blocking
approach, you can use `gevent `_.
But keep in mind that the same technical issues that make Eventlet maintenance
unsustainable over the long term also apply to Gevent.
07070100000027000081A400000000000000000000000166E1887C000001B0000000000000000000000000000000000000004600000000eventlet-0.37.0+git.1726056572.8637820/doc/source/asyncio/warning.rst .. warning::
Eventlet is now in maintenance mode, so only the changes who are related
to fixing a bug or who are related to the new Asyncio hub will be accepted.
New features outside of the scope of the Asyncio hub won't be accepted.
:ref:`migration-guide` is strongly encouraged. We encourage existing
users to migrate to Asyncio. For further details see the official
:ref:`migration guide `.
07070100000028000081A400000000000000000000000166E1887C0000003A000000000000000000000000000000000000003E00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/authors.rst .. _authors:
Authors
=======
.. include:: ../../AUTHORS
07070100000029000081A400000000000000000000000166E1887C000016C0000000000000000000000000000000000000004200000000eventlet-0.37.0+git.1726056572.8637820/doc/source/basic_usage.rst Basic Usage
=============
If it's your first time to Eventlet, you may find the illuminated examples in the :ref:`design-patterns` document to be a good starting point.
Eventlet is built around the concept of green threads (i.e. coroutines, we use the terms interchangeably) that are launched to do network-related work. Green threads differ from normal threads in two main ways:
* Green threads are so cheap they are nearly free. You do not have to conserve green threads like you would normal threads. In general, there will be at least one green thread per network connection.
* Green threads cooperatively yield to each other instead of preemptively being scheduled. The major advantage from this behavior is that shared data structures don't need locks, because only if a yield is explicitly called can another green thread have access to the data structure. It is also possible to inspect primitives such as queues to see if they have any pending data.
Primary API
===========
The design goal for Eventlet's API is simplicity and readability. You should be able to read its code and understand what it's doing. Fewer lines of code are preferred over excessively clever implementations. `Like Python itself `_, there should be one, and only one obvious way to do it in Eventlet!
Though Eventlet has many modules, much of the most-used stuff is accessible simply by doing ``import eventlet``. Here's a quick summary of the functionality available in the ``eventlet`` module, with links to more verbose documentation on each.
Greenthread Spawn
-----------------------
.. function:: eventlet.spawn(func, *args, **kw)
This launches a greenthread to call *func*. Spawning off multiple greenthreads gets work done in parallel. The return value from ``spawn`` is a :class:`greenthread.GreenThread` object, which can be used to retrieve the return value of *func*. See :func:`spawn ` for more details.
.. function:: eventlet.spawn_n(func, *args, **kw)
The same as :func:`spawn`, but it's not possible to know how the function terminated (i.e. no return value or exceptions). This makes execution faster. See :func:`spawn_n ` for more details.
.. function:: eventlet.spawn_after(seconds, func, *args, **kw)
Spawns *func* after *seconds* have elapsed; a delayed version of :func:`spawn`. To abort the spawn and prevent *func* from being called, call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`. See :func:`spawn_after ` for more details.
Greenthread Control
-----------------------
.. function:: eventlet.sleep(seconds=0)
Suspends the current greenthread and allows others a chance to process. See :func:`sleep ` for more details.
.. class:: eventlet.GreenPool
Pools control concurrency. It's very common in applications to want to consume only a finite amount of memory, or to restrict the amount of connections that one part of the code holds open so as to leave more for the rest, or to behave consistently in the face of unpredictable input data. GreenPools provide this control. See :class:`GreenPool ` for more on how to use these.
.. class:: eventlet.GreenPile
GreenPile objects represent chunks of work. In essence a GreenPile is an iterator that can be stuffed with work, and the results read out later. See :class:`GreenPile ` for more details.
.. class:: eventlet.Queue
Queues are a fundamental construct for communicating data between execution units. Eventlet's Queue class is used to communicate between greenthreads, and provides a bunch of useful features for doing that. See :class:`Queue ` for more details.
.. class:: eventlet.Timeout
This class is a way to add timeouts to anything. It raises *exception* in the current greenthread after *timeout* seconds. When *exception* is omitted or ``None``, the Timeout instance itself is raised.
Timeout objects are context managers, and so can be used in with statements.
See :class:`Timeout ` for more details.
Patching Functions
---------------------
.. function:: eventlet.import_patched(modulename, *additional_modules, **kw_additional_modules)
Imports a module in a way that ensures that the module uses "green" versions of the standard library modules, so that everything works nonblockingly. The only required argument is the name of the module to be imported. For more information see :ref:`import-green`.
.. function:: eventlet.monkey_patch(all=True, os=False, select=False, socket=False, thread=False, time=False)
Globally patches certain system modules to be greenthread-friendly. The keyword arguments afford some control over which modules are patched. If *all* is True, then all modules are patched regardless of the other arguments. If it's False, then the rest of the keyword arguments control patching of specific subsections of the standard library. Most patch the single module of the same name (os, time, select). The exceptions are socket, which also patches the ssl module if present; and thread, which patches thread, threading, and Queue. It's safe to call monkey_patch multiple times. For more information see :ref:`monkey-patch`.
Network Convenience Functions
------------------------------
.. autofunction:: eventlet.connect
.. autofunction:: eventlet.listen
.. autofunction:: eventlet.wrap_ssl
.. autofunction:: eventlet.serve
.. autoclass:: eventlet.StopServe
These are the basic primitives of Eventlet; there are a lot more out there in the other Eventlet modules; check out the :doc:`modules`.
0707010000002A000081A400000000000000000000000166E1887C0000008F000000000000000000000000000000000000003D00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/common.txt .. |internal| replace::
This is considered an internal API, and it might change
unexpectedly without being deprecated first.
0707010000002B000081A400000000000000000000000166E1887C00001A4A000000000000000000000000000000000000003A00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/conf.py #
# Eventlet documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 4 19:48:27 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath(".."))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinxcontrib.apidoc'
]
apidoc_module_dir = '../../eventlet'
apidoc_output_dir = 'reference/api'
# If this is True, '.. todo::' and '.. todolist::' produce output, else they produce
# nothing. The default is False.
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Eventlet'
copyright = '2005-2024, Eventlet Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import eventlet
# The full version, including alpha/beta/rc tags.
release = eventlet.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Intersphinx references
intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Eventletdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Eventlet.tex', 'Eventlet Documentation',
'', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
0707010000002C000081A400000000000000000000000166E1887C00000450000000000000000000000000000000000000004100000000eventlet-0.37.0+git.1726056572.8637820/doc/source/contribute.rst .. _how-to-contribute:
How to Contribute to Eventlet
#############################
.. include:: asyncio/warning.rst
Contribution are welcome.
You want to report something? Read :ref:`report-a-bug`.
You want to propose changes? Read :ref:`propose-changes`.
.. _report-a-bug:
Report a Bug
=============
You find a bug and you want to report it?
You simply have to `create a new github issue `_
where you describe your problem.
Do not forget to provide technical details like:
* the hub you use
* the context of your bug
* the error message you get
* everything else that may help us to understand your problem.
The more you give details, the more we will be able to help you.
.. _propose-changes:
Propose Changes
===============
You may want to propose changes to fix a bug, improve the documentation, etc.
Feel free to open a pull request: https://github.com/eventlet/eventlet/pulls
.. include:: asyncio/warning.rst
We will be happy to review it.
At this point you may be also interested by :ref:`how to test Eventlet `.
0707010000002D000081A400000000000000000000000166E1887C0000193A000000000000000000000000000000000000004600000000eventlet-0.37.0+git.1726056572.8637820/doc/source/design_patterns.rst .. _design-patterns:
Design Patterns
=================
There are a bunch of basic patterns that Eventlet usage falls into. Here are a few examples that show their basic structure.
Client Pattern
--------------------
The canonical client-side example is a web crawler. This use case is given a list of urls and wants to retrieve their bodies for later processing. Here is a very simple example::
import eventlet
from eventlet.green.urllib.request import urlopen
urls = ["http://www.google.com/intl/en_ALL/images/logo.gif",
"https://www.python.org/static/img/python-logo.png",
"http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"]
def fetch(url):
return urlopen(url).read()
pool = eventlet.GreenPool()
for body in pool.imap(fetch, urls):
print("got body", len(body))
There is a slightly more complex version of this in the :ref:`web crawler example `. Here's a tour of the interesting lines in this crawler.
``from eventlet.green... import urlopen`` is how you import a cooperatively-yielding version of urllib. It is the same in all respects to the standard version, except that it uses green sockets for its communication. This is an example of the :ref:`import-green` pattern.
``pool = eventlet.GreenPool()`` constructs a :class:`GreenPool ` of a thousand green threads. Using a pool is good practice because it provides an upper limit on the amount of work that this crawler will be doing simultaneously, which comes in handy when the input data changes dramatically.
``for body in pool.imap(fetch, urls):`` iterates over the results of calling the fetch function in parallel. :meth:`imap ` makes the function calls in parallel, and the results are returned in the order that they were executed.
The key aspect of the client pattern is that it involves collecting the results of each function call; the fact that each fetch is done concurrently is essentially an invisible optimization. Note also that imap is memory-bounded and won't consume gigabytes of memory if the list of urls grows to the tens of thousands (yes, we had that problem in production once!).
Server Pattern
--------------------
Here's a simple server-side example, a simple echo server::
import eventlet
def handle(client):
while True:
c = client.recv(1)
if not c: break
client.sendall(c)
server = eventlet.listen(('0.0.0.0', 6000))
pool = eventlet.GreenPool(10000)
while True:
new_sock, address = server.accept()
pool.spawn_n(handle, new_sock)
The file :ref:`echo server example ` contains a somewhat more robust and complex version of this example.
``server = eventlet.listen(('0.0.0.0', 6000))`` uses a convenience function to create a listening socket.
``pool = eventlet.GreenPool(10000)`` creates a pool of green threads that could handle ten thousand clients.
``pool.spawn_n(handle, new_sock)`` launches a green thread to handle the new client. The accept loop doesn't care about the return value of the ``handle`` function, so it uses :meth:`spawn_n `, instead of :meth:`spawn `.
The difference between the server and the client patterns boils down to the fact that the server has a ``while`` loop calling ``accept()`` repeatedly, and that it hands off the client socket completely to the handle() method, rather than collecting the results.
Dispatch Pattern
-------------------
One common use case that Linden Lab runs into all the time is a "dispatch" design pattern. This is a server that is also a client of some other services. Proxies, aggregators, job workers, and so on are all terms that apply here. This is the use case that the :class:`GreenPile ` was designed for.
Here's a somewhat contrived example: a server that receives POSTs from clients that contain a list of urls of RSS feeds. The server fetches all the feeds concurrently and responds with a list of their titles to the client. It's easy to imagine it doing something more complex than this, and this could be easily modified to become a Reader-style application::
import eventlet
feedparser = eventlet.import_patched('feedparser')
pool = eventlet.GreenPool()
def fetch_title(url):
d = feedparser.parse(url)
return d.feed.get('title', '')
def app(environ, start_response):
pile = eventlet.GreenPile(pool)
for url in environ['wsgi.input'].readlines():
pile.spawn(fetch_title, url)
titles = '\n'.join(pile)
start_response('200 OK', [('Content-type', 'text/plain')])
return [titles]
The full version of this example is in the :ref:`feed_scraper_example`, which includes code to start the WSGI server on a particular port.
This example uses a global (gasp) :class:`GreenPool ` to control concurrency. If we didn't have a global limit on the number of outgoing requests, then a client could cause the server to open tens of thousands of concurrent connections to external servers, thereby getting feedscraper's IP banned, or various other accidental-or-on-purpose bad behavior. The pool isn't a complete DoS protection, but it's the bare minimum.
.. highlight:: python
:linenothreshold: 1
The interesting lines are in the app function::
pile = eventlet.GreenPile(pool)
for url in environ['wsgi.input'].readlines():
pile.spawn(fetch_title, url)
titles = '\n'.join(pile)
.. highlight:: python
:linenothreshold: 1000
Note that in line 1, the Pile is constructed using the global pool as its argument. That ties the Pile's concurrency to the global's. If there are already 1000 concurrent fetches from other clients of feedscraper, this one will block until some of those complete. Limitations are good!
Line 3 is just a spawn, but note that we don't store any return value from it. This is because the return value is kept in the Pile itself. This becomes evident in the next line...
Line 4 is where we use the fact that the Pile is an iterator. Each element in the iterator is one of the return values from the fetch_title function, which are strings. We can use a normal Python idiom (:func:`join`) to concatenate these incrementally as they happen.
0707010000002E000081A400000000000000000000000166E1887C000002E2000000000000000000000000000000000000004200000000eventlet-0.37.0+git.1726056572.8637820/doc/source/environment.rst .. _env_vars:
Environment Variables
======================
Eventlet's behavior can be controlled by a few environment variables.
These are only for the advanced user.
EVENTLET_HUB
Used to force Eventlet to use the specified hub instead of the
optimal one. See :ref:`understanding_hubs` for the list of
acceptable hubs and what they mean (note that picking a hub not on
the list will silently fail). Equivalent to calling
:meth:`eventlet.hubs.use_hub` at the beginning of the program.
EVENTLET_THREADPOOL_SIZE
The size of the threadpool in :mod:`~eventlet.tpool`. This is an
environment variable because tpool constructs its pool on first
use, so any control of the pool size needs to happen before then.
0707010000002F000081A400000000000000000000000166E1887C00000A01000000000000000000000000000000000000003F00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/examples.rst Examples
========
Here are a bunch of small example programs that use Eventlet. All of these examples can be found in the ``examples`` directory of a source copy of Eventlet.
.. _web_crawler_example:
Web Crawler
------------
``examples/webcrawler.py``
.. literalinclude:: ../../examples/webcrawler.py
.. _wsgi_server_example:
WSGI Server
------------
``examples/wsgi.py``
.. literalinclude:: ../../examples/wsgi.py
.. _echo_server_example:
Echo Server
-----------
``examples/echoserver.py``
.. literalinclude:: ../../examples/echoserver.py
.. _socket_connect_example:
Socket Connect
--------------
``examples/connect.py``
.. literalinclude:: ../../examples/connect.py
.. _chat_server_example:
Multi-User Chat Server
-----------------------
``examples/chat_server.py``
This is a little different from the echo server, in that it broadcasts the
messages to all participants, not just the sender.
.. literalinclude:: ../../examples/chat_server.py
.. _feed_scraper_example:
Feed Scraper
-----------------------
``examples/feedscraper.py``
This example requires `Feedparser `_ to be installed or on the PYTHONPATH.
.. literalinclude:: ../../examples/feedscraper.py
.. _forwarder_example:
Port Forwarder
-----------------------
``examples/forwarder.py``
.. literalinclude:: ../../examples/forwarder.py
.. _recursive_crawler_example:
Recursive Web Crawler
-----------------------------------------
``examples/recursive_crawler.py``
This is an example recursive web crawler that fetches linked pages from a seed url.
.. literalinclude:: ../../examples/recursive_crawler.py
.. _producer_consumer_example:
Producer Consumer Web Crawler
-----------------------------------------
``examples/producer_consumer.py``
This is an example implementation of the producer/consumer pattern as well as being identical in functionality to the recursive web crawler.
.. literalinclude:: ../../examples/producer_consumer.py
.. _websocket_example:
Websocket Server Example
--------------------------
``examples/websocket.py``
This exercises some of the features of the websocket server
implementation.
.. literalinclude:: ../../examples/websocket.py
.. _websocket_chat_example:
Websocket Multi-User Chat Example
-----------------------------------
``examples/websocket_chat.py``
This is a mashup of the websocket example and the multi-user chat example, showing how you can do the same sorts of things with websockets that you can do with regular sockets.
.. literalinclude:: ../../examples/websocket_chat.py
07070100000030000081A400000000000000000000000166E1887C00000621000000000000000000000000000000000000003E00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/history.rst .. _history:
History
-------
Eventlet began life as Donovan Preston was talking to Bob Ippolito about coroutine-based non-blocking networking frameworks in Python. Most non-blocking frameworks require you to run the "main loop" in order to perform all network operations, but Donovan wondered if a library written using a trampolining style could get away with transparently running the main loop any time i/o was required, stopping the main loop once no more i/o was scheduled. Bob spent a few days during PyCon 2006 writing a proof-of-concept. He named it eventlet, after the coroutine implementation it used, `greenlet `_. Donovan began using eventlet as a light-weight network library for his spare-time project `Pavel `_, and also began writing some unittests.
* http://svn.red-bean.com/bob/eventlet/trunk/
When Donovan started at Linden Lab in May of 2006, he added eventlet as an svn external in the ``indra/lib/python directory``, to be a dependency of the yet-to-be-named backbone project (at the time, it was named restserv). However, including eventlet as an svn external meant that any time the externally hosted project had hosting issues, Linden developers were not able to perform svn updates. Thus, the eventlet source was imported into the linden source tree at the same location, and became a fork.
Bob Ippolito has ceased working on eventlet and has stated his desire for Linden to take it's fork forward to the open source world as "the" eventlet.
07070100000031000081A400000000000000000000000166E1887C00001084000000000000000000000000000000000000003B00000000eventlet-0.37.0+git.1726056572.8637820/doc/source/hubs.rst .. _understanding_hubs:
Understanding Eventlet Hubs
===========================
A hub forms the basis of Eventlet's event loop, which dispatches I/O events and schedules greenthreads. It is the existence of the hub that promotes coroutines (which can be tricky to program with) into greenthreads (which are easy).
Eventlet has multiple hub implementations, and when you start using it, it tries to select the best hub implementation for your system. The hubs that it supports are (in order of preference):
**asyncio**
| Asyncio based hub. Run Eventlet code in an Asyncio eventloop.
| By using this hub, Asyncio and Eventlet can be run the same thread in the same process.
| We discourage new Eventlet projects.
| We encourage existing Eventlet projects to migrate from Eventlet to Asyncio.
| This hub allow you incremental and smooth migration.
| See the :ref:`migration-guide` for further details.
| See the :ref:`asyncio-compatibility` for the current state of the art.
**epolls**
Linux. This is the fastest hub for Linux.
**kqueue**
FreeBSD and Mac OSX. Fastest hub for OS with kqueue.
**poll**
On platforms that support it.
**selects**
Lowest-common-denominator, available everywhere.
The only non-pure Python, pyevent hub (using libevent) was removed because it was not maintained. You are warmly welcome to contribute fast hub implementation using Cython, CFFI or other technology of your choice.
If the selected hub is not ideal for the application, another can be selected. You can make the selection either with the environment variable :ref:`EVENTLET_HUB `, or with :func:`eventlet.hubs.use_hub`.
.. function:: eventlet.hubs.use_hub(hub=None)
Use this to control which hub Eventlet selects. Call it with the name of the desired hub module. Make sure to do this before the application starts doing any I/O! Calling use_hub completely eliminates the old hub, and any file descriptors or timers that it had been managing will be forgotten. Put the call as one of the first lines in the main module.::
""" This is the main module """
import eventlet.hubs
eventlet.hubs.use_hub("eventlet.hubs.epolls")
Hubs are implemented as thread-local class instances. :func:`eventlet.hubs.use_hub` only operates on the current thread. When using multiple threads that each need their own hub, call :func:`eventlet.hubs.use_hub` at the beginning of each thread function that needs a specific hub. In practice, it may not be necessary to specify a hub in each thread; it works to use one special hub for the main thread, and let other threads use the default hub; this hybrid hub configuration will work fine.
It is also possible to use a third-party hub module in place of one of the built-in ones. Simply pass the module itself to :func:`eventlet.hubs.use_hub`. The task of writing such a hub is a little beyond the scope of this document, it's probably a good idea to simply inspect the code of the existing hubs to see how they work.::
import eventlet.hubs
import mypackage.myhub
eventlet.hubs.use_hub(mypackage.myhub)
Supplying None as the argument to :func:`eventlet.hubs.use_hub` causes it to select the default hub.
How the Hubs Work
-----------------
The hub has a main greenlet, MAINLOOP. When one of the running coroutines needs
to do some I/O, it registers a listener with the hub (so that the hub knows when to wake it up again), and then switches to MAINLOOP (via ``get_hub().switch()``). If there are other coroutines that are ready to run, MAINLOOP switches to them, and when they complete or need to do more I/O, they switch back to the MAINLOOP. In this manner, MAINLOOP ensures that every coroutine gets scheduled when it has some work to do.
MAINLOOP is launched only when the first I/O operation happens, and it is not the same greenlet that __main__ is running in. This lazy launching means that code can start using Eventlet without needing to be substantially restructured.
More Hub-Related Functions
--------------------------
.. autofunction:: eventlet.hubs.get_hub
.. autofunction:: eventlet.hubs.get_default_hub
.. autofunction:: eventlet.hubs.trampoline
07070100000032000041ED00000000000000000000000266E1887C00000000000000000000000000000000000000000000003900000000eventlet-0.37.0+git.1726056572.8637820/doc/source/images 07070100000033000081A400000000000000000000000166E1887C000083FA000000000000000000000000000000000000005400000000eventlet-0.37.0+git.1726056572.8637820/doc/source/images/threading_illustration.png PNG
IHDR f W HiCCPICC Profile xy8lcٍ%5k"dd1PlɖB T*P(7V֊"꾯\yy>|9s3 pS($8 B6;v£#`@ =)VVf_ ѫclTA `wcmB 4~tsIJT[k]o@
tv:y}I"@C U
8 f@@ ٥=)T
}\JG# PӁbct lҐ'ؾ[o;GYi+zO} F^DmJ
B/"_w큀&>@DN.F ʑUTCڗцɄCfIgc"9T8}q\#<";HwVa!'1}Zi]e2x,yBℒr*QSc_:z-4ÃFu&f~V7=ٰ㶗sut:|HKkѷn=6q>x~jDcJ$.~k
52D+T)L*\8w[$S2jɱ讘NQN88;ؗԜ\z.TLH7r5e](xwmphMIg}e7E*+ͫoQnT\-SzM)-f?Oj=z::_tJ~֭5;>t__ ;wkJrn1ykcrcIHSSOgXfff'%79?x&m`i4h1@XyZ%D2
E`@223̲lqZqP9q/yw0Q'ΈI4KJJNNG>P}rʚz}\8(Jf ghmb|SA9u+!^k y[u;m{#kG.N#$װn'<=}}moHD SVrP0pK㑽QN<C?'ؙڳqIAɉ)URc^fpfZ^8Uwqz6.G"WzF>j5Bₒ2=7g*TUztuXB]t=zFQM-u:6Sgupt}U*c+uǛk}!YK|81(qӋYY\Ϯ_t~XZxMs?+kn7=H Sp40, IDF.ۘ1Ln\V">vgl;
`.gn};ywE8YDP"?cb=-e;%ä5w f,ʎʽW(PLCRRVPaUV}NkH&,j<ٗ&Js@Pm}. ZX}3>);F1&&=If~YP-,Z
ְ<=ogk/dѡ1'#d
Wkѫngcs~DıݤJ_x0%3H'+5'X3x4$6T ^m8[xjA@'cc\b87|:dz:I)Χ^MIџ9{velל+yW[
\k)l)j*/i(mZɛ+U,nTԴӺG/hd!h{˳Ͽvtwپ*^z}owC#z|2L~&0
uGv
b[[ n :l/
M dx+|G\G#E.+! u5 P0CS
MF#T>&~&
%f-w)L8E%) kk/[*
{V]pq\TYnoQ7;r0b^Yޝ~fČcC5`AOP:AR4
< Z_qWqI)s=ik$/]q͝_)*/.zfbe-jZuGMyY=Wr>AѡcʓjSY܋߹VNV,[]Ɔ͝hn[!`hЀS7<qR yEI|QEuO-:=ŨTδ̬|ILhd|a5eɆa#g/r`co99q\B\<ܗxxwH Iħj.U$%wVH6J*##CkPXLDJjy{O4Q3IԢNչ[ף?o`$elfB51{b`)ley(Mm:`;88_8qU/ osDԱ=~\g(!#gQ'NE/j:s{"!lO6>TLYYK.?Α˽?_qV] ,\rV}uf]{cB[j=Ryt/Ļ{HKpd֤/җ+ūko76;