chore: reformat code and delete assets
Browse files- app.py +9 -14
- assets/pipeline.png +0 -0
app.py
CHANGED
|
@@ -1,12 +1,10 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import tensorflow as tf
|
| 3 |
import numpy as np
|
| 4 |
-
import matplotlib.pyplot as plt
|
| 5 |
|
| 6 |
# Setting random seed to obtain reproducible results.
|
| 7 |
tf.random.set_seed(42)
|
| 8 |
|
| 9 |
-
|
| 10 |
# Initialize global variables.
|
| 11 |
AUTO = tf.data.AUTOTUNE
|
| 12 |
BATCH_SIZE = 1
|
|
@@ -129,6 +127,7 @@ def map_fn(pose):
|
|
| 129 |
)
|
| 130 |
return (rays_flat, t_vals)
|
| 131 |
|
|
|
|
| 132 |
def render_rgb_depth(model, rays_flat, t_vals, rand=True, train=True):
|
| 133 |
"""Generates the RGB image and depth map from model prediction.
|
| 134 |
|
|
@@ -182,7 +181,6 @@ def render_rgb_depth(model, rays_flat, t_vals, rand=True, train=True):
|
|
| 182 |
depth_map = tf.reduce_sum(weights * t_vals[:, None, None], axis=-1)
|
| 183 |
return (rgb, depth_map)
|
| 184 |
|
| 185 |
-
nerf_loaded = tf.keras.models.load_model("nerf", compile=False)
|
| 186 |
|
| 187 |
def get_translation_t(t):
|
| 188 |
"""Get the translation matrix for movement in t."""
|
|
@@ -243,15 +241,20 @@ def show_rendered_image(r,theta,phi):
|
|
| 243 |
)
|
| 244 |
return(rgb[0], depth[0])
|
| 245 |
|
|
|
|
| 246 |
# app.py text matter starts here
|
| 247 |
st.title('NeRF:3D volumetric rendering with NeRF')
|
| 248 |
st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)")
|
| 249 |
st.markdown("## Description")
|
| 250 |
st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.")
|
| 251 |
st.markdown("## Interactive Demo")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
# set the values of r theta phi
|
| 253 |
r = 4.0
|
| 254 |
-
theta = st.slider(
|
| 255 |
phi = -30.0
|
| 256 |
color, depth = show_rendered_image(r, theta, phi)
|
| 257 |
|
|
@@ -259,13 +262,11 @@ col1, col2= st.columns(2)
|
|
| 259 |
|
| 260 |
with col1:
|
| 261 |
color = tf.keras.utils.array_to_img(color)
|
| 262 |
-
|
| 263 |
-
st.image(color, caption = "Color",clamp = True, width = 300)
|
| 264 |
-
|
| 265 |
|
| 266 |
with col2:
|
| 267 |
depth = tf.keras.utils.array_to_img(depth[..., None])
|
| 268 |
-
st.image(depth, caption
|
| 269 |
|
| 270 |
st.markdown("## Tutorials")
|
| 271 |
st.markdown("- [Keras](https://keras.io/examples/vision/nerf/)")
|
|
@@ -276,9 +277,3 @@ st.markdown("- [PyImageSearch NeRF 3](https://www.pyimagesearch.com/2021/11/24/c
|
|
| 276 |
st.markdown("## Credits")
|
| 277 |
st.markdown("- [PyImageSearch](https://www.pyimagesearch.com/)")
|
| 278 |
st.markdown("- [JarvisLabs.ai GPU credits](https://jarvislabs.ai/)")
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import tensorflow as tf
|
| 3 |
import numpy as np
|
|
|
|
| 4 |
|
| 5 |
# Setting random seed to obtain reproducible results.
|
| 6 |
tf.random.set_seed(42)
|
| 7 |
|
|
|
|
| 8 |
# Initialize global variables.
|
| 9 |
AUTO = tf.data.AUTOTUNE
|
| 10 |
BATCH_SIZE = 1
|
|
|
|
| 127 |
)
|
| 128 |
return (rays_flat, t_vals)
|
| 129 |
|
| 130 |
+
|
| 131 |
def render_rgb_depth(model, rays_flat, t_vals, rand=True, train=True):
|
| 132 |
"""Generates the RGB image and depth map from model prediction.
|
| 133 |
|
|
|
|
| 181 |
depth_map = tf.reduce_sum(weights * t_vals[:, None, None], axis=-1)
|
| 182 |
return (rgb, depth_map)
|
| 183 |
|
|
|
|
| 184 |
|
| 185 |
def get_translation_t(t):
|
| 186 |
"""Get the translation matrix for movement in t."""
|
|
|
|
| 241 |
)
|
| 242 |
return(rgb[0], depth[0])
|
| 243 |
|
| 244 |
+
|
| 245 |
# app.py text matter starts here
|
| 246 |
st.title('NeRF:3D volumetric rendering with NeRF')
|
| 247 |
st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)")
|
| 248 |
st.markdown("## Description")
|
| 249 |
st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.")
|
| 250 |
st.markdown("## Interactive Demo")
|
| 251 |
+
|
| 252 |
+
# load the pre-trained model
|
| 253 |
+
nerf_loaded = tf.keras.models.load_model("nerf", compile=False)
|
| 254 |
+
|
| 255 |
# set the values of r theta phi
|
| 256 |
r = 4.0
|
| 257 |
+
theta = st.slider("Enter a value for Θ:", min_value=0.0, max_value=360.0)
|
| 258 |
phi = -30.0
|
| 259 |
color, depth = show_rendered_image(r, theta, phi)
|
| 260 |
|
|
|
|
| 262 |
|
| 263 |
with col1:
|
| 264 |
color = tf.keras.utils.array_to_img(color)
|
| 265 |
+
st.image(color, caption="Color Image", clamp=True, width=300)
|
|
|
|
|
|
|
| 266 |
|
| 267 |
with col2:
|
| 268 |
depth = tf.keras.utils.array_to_img(depth[..., None])
|
| 269 |
+
st.image(depth, caption="Depth Map", clamp=True, width=300)
|
| 270 |
|
| 271 |
st.markdown("## Tutorials")
|
| 272 |
st.markdown("- [Keras](https://keras.io/examples/vision/nerf/)")
|
|
|
|
| 277 |
st.markdown("## Credits")
|
| 278 |
st.markdown("- [PyImageSearch](https://www.pyimagesearch.com/)")
|
| 279 |
st.markdown("- [JarvisLabs.ai GPU credits](https://jarvislabs.ai/)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assets/pipeline.png
DELETED
|
Binary file (333 kB)
|
|
|