diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 0fdfac4..a5f73ac 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -25,16 +25,6 @@ env: jobs: build: runs-on: ubuntu-latest - strategy: - fail-fast: true - matrix: - platform: - - linux/amd64 - - linux/arm/v5 - - linux/arm/v7 - - linux/arm64/v8 - - linux/386 - - windows/amd64 permissions: contents: read packages: write @@ -46,14 +36,6 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - # Install the cosign tool except on PR - # https://github.com/sigstore/cosign-installer - - name: Install cosign - if: github.event_name != 'pull_request' - uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 #v3.1.1 - with: - cosign-release: 'v2.1.1' - # Set up BuildKit Docker container builder to be able to build # multi-platform images and export cache # https://github.com/docker/setup-buildx-action @@ -90,19 +72,4 @@ jobs: labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - platforms: ${{ matrix.platform }} - - # Sign the resulting Docker image digest except on PRs. - # This will only write to the public Rekor transparency log when the Docker - # repository is public to avoid leaking data. If you would like to publish - # transparency data even for private images, pass --force to cosign below. - # https://github.com/sigstore/cosign - - name: Sign the published Docker image - if: ${{ github.event_name != 'pull_request' }} - env: - # https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable - TAGS: ${{ steps.meta.outputs.tags }} - DIGEST: ${{ steps.build-and-push.outputs.digest }} - # This step uses the identity token to provision an ephemeral certificate - # against the sigstore community Fulcio instance. - run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} + platforms: linux/amd64,linux/arm64/v8,linux/386 diff --git a/Dockerfile b/Dockerfile index 582bba6..f5f0609 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,8 +17,8 @@ COPY --from=generate-requirements /app/requirements.txt ./ RUN ["pip", "install", "-r", "requirements.txt"] -COPY src . +COPY src/main.py . -ENTRYPOINT [ "uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000", "--proxy-headers" ] +ENTRYPOINT [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--proxy-headers" ] EXPOSE 8000 \ No newline at end of file diff --git a/src/main.py b/src/main.py index be8da4b..5a7045a 100644 --- a/src/main.py +++ b/src/main.py @@ -125,8 +125,9 @@ async def url_worker(): await session.execute(update(URL).where(URL.id == next_job.url.id).values(last_seen=saved_dt)) await session.execute(update(Job).where(Job.id == next_job.id).values(completed=saved_dt)) break - except Exception as e: - raise e + except Exception: + pass + await asyncio.sleep(10) else: # Ran out of retries, try again async with session.begin(): if next_job.retry < 4: @@ -140,15 +141,13 @@ async def repeat_url_worker(): created_at: datetime.datetime = None while True: curtime = datetime.datetime.now(tz=datetime.timezone.utc) - async with async_session() as session: + async with async_session() as session, session.begin(): stmt = select(RepeatURL).where(RepeatURL.active_since <= curtime).order_by(RepeatURL.created_at) - async with session.begin(): - result = await session.scalars(stmt) - jobs = result.all() + result = await session.scalars(stmt) + jobs = result.all() stmt2 = select(URL.url).join(Job).where(URL.url.in_([job.url.url for job in jobs]) & (Job.completed == None) & (Job.failed == None)) - async with session.begin(): - result = await session.scalars(stmt2) - existing_jobs = result.all() + result = await session.scalars(stmt2) + existing_jobs = result.all() queued: list[Job] = [] for job in jobs: if (not job.url.last_seen or job.url.last_seen + datetime.timedelta(seconds=job.interval) < curtime) and job.url.url not in existing_jobs: # Job can be re-queued @@ -156,10 +155,10 @@ async def repeat_url_worker(): # batch = await Batch.objects.create() batch = Batch() created_at = curtime + session.add(batch) queued.append(Job(url=job.url, priority=10, batches=[batch, job.batch])) if queued: - async with session.begin(): - session.add_all(queued) + session.add_all(queued) await asyncio.sleep(60)