summaryrefslogtreecommitdiff
path: root/var
diff options
context:
space:
mode:
authorJen Herting <jen@herting.cc>2024-10-18 14:55:27 -0400
committerGitHub <noreply@github.com>2024-10-18 20:55:27 +0200
commit957c0cc9dad60b6e6a3ae3ace37c4513f800700c (patch)
tree47af2f2ab998a0d46cd9b7b19b835c752e713286 /var
parent99e4d6b4468f9ad52ed9aa41102316230854b5c8 (diff)
downloadspack-957c0cc9dad60b6e6a3ae3ace37c4513f800700c.tar.gz
spack-957c0cc9dad60b6e6a3ae3ace37c4513f800700c.tar.bz2
spack-957c0cc9dad60b6e6a3ae3ace37c4513f800700c.tar.xz
spack-957c0cc9dad60b6e6a3ae3ace37c4513f800700c.zip
py-clip-anytorch: new package (#47050)
* py-clip-anytorch: new package * py-clip-anytorch: ran black py-langchain-core: ran black py-pydantic: ran black py-dalle2-pytorch: ran black * [py-clip-anytorch] fixed license(checked_by) * Apply suggestion from Wouter on fixing CI Co-authored-by: Wouter Deconinck <wdconinc@gmail.com> --------- Co-authored-by: Alex C Leute <acl2809@rit.edu> Co-authored-by: Bernhard Kaindl <bernhardkaindl7@gmail.com> Co-authored-by: Wouter Deconinck <wdconinc@gmail.com>
Diffstat (limited to 'var')
-rw-r--r--var/spack/repos/builtin/packages/py-clip-anytorch/package.py33
1 files changed, 33 insertions, 0 deletions
diff --git a/var/spack/repos/builtin/packages/py-clip-anytorch/package.py b/var/spack/repos/builtin/packages/py-clip-anytorch/package.py
new file mode 100644
index 0000000000..283adbfeea
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-clip-anytorch/package.py
@@ -0,0 +1,33 @@
+# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+
+from spack.package import *
+
+
+class PyClipAnytorch(PythonPackage):
+ """CLIP (Contrastive Language-Image Pre-Training) is a neural network
+ trained on a variety of (image, text) pairs. It can be instructed in
+ natural language to predict the most relevant text snippet, given an image,
+ without directly optimizing for the task, similarly to the zero-shot
+ capabilities of GPT-2 and 3. We found CLIP matches the performance of the
+ original ResNet50 on ImageNet "zero-shot" without using any of the original
+ 1.28M labeled examples, overcoming several major challenges in computer
+ vision."""
+
+ homepage = "https://github.com/rom1504/CLIP"
+ # PyPI source is missing requirements.txt
+ url = "https://github.com/rom1504/CLIP/archive/refs/tags/2.6.0.tar.gz"
+
+ license("MIT", checked_by="qwertos")
+
+ version("2.6.0", sha256="1ac1f6ca47dfb5d4e55be8f45cc2f3bdf6415b91973a04b4529e812a8ae29bea")
+
+ depends_on("py-setuptools", type="build")
+ depends_on("py-ftfy", type=("build", "run"))
+ depends_on("py-regex", type=("build", "run"))
+ depends_on("py-tqdm", type=("build", "run"))
+ depends_on("py-torch", type=("build", "run"))
+ depends_on("py-torchvision", type=("build", "run"))