Skip to content

redwood_dataset

Module providing dataset class for annotated Redwood dataset.

AnnotatedRedwoodDataset

Bases: Dataset

Dataset class for annotated Redwood dataset.

Data can be found here: http://redwood-data.org/3dscan/index.html

Annotations are part SDFEst repo.

Expected directory format

{root_dir}/{category_str}/rgbd/{sequence_id}/... {ann_dir}/{sequence_id}.obj {ann_dir}/annotations.json

Source code in sdfest/initialization/datasets/redwood_dataset.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
class AnnotatedRedwoodDataset(torch.utils.data.Dataset):
    """Dataset class for annotated Redwood dataset.

    Data can be found here:
    http://redwood-data.org/3dscan/index.html

    Annotations are part SDFEst repo.

    Expected directory format:
        {root_dir}/{category_str}/rgbd/{sequence_id}/...
        {ann_dir}/{sequence_id}.obj
        {ann_dir}/annotations.json
    """

    num_categories = 3
    category_id_to_str = {
        0: "bottle",
        1: "bowl",
        2: "mug",
    }
    category_str_to_id = {v: k for k, v in category_id_to_str.items()}

    class Config(TypedDict, total=False):
        """Configuration dictionary for annoated Redwood dataset.

        Attributes:
            root_dir: See AnnotatedRedwoodDataset docstring.
            ann_dir: See AnnotatedRedwoodDataset docstring.
            mask_pointcloud: Whether the returned pointcloud will be masked.
            normalize_pointcloud:
                Whether the returned pointcloud and position will be normalized, such
                that pointcloud centroid is at the origin.
            scale_convention:
                Which scale is returned. The following strings are supported:
                    "diagonal":
                        Length of bounding box' diagonal. This is what NOCS uses.
                    "max": Maximum side length of bounding box.
                    "half_max": Half maximum side length of bounding box.
                    "full": Bounding box side lengths. Shape (3,).
            camera_convention:
                Which camera convention is used for position and orientation. One of:
                    "opengl": x right, y up, z back
                    "opencv": x right, y down, z forward
                Note that this does not influence how the dataset is processed, only the
                returned position and quaternion.
            orientation_repr:
                Which orientation representation is used. One of:
                    "quaternion"
                    "discretized"
            orientation_grid_resolution:
                Resolution of the orientation grid.
                Only used if orientation_repr is "discretized".
            remap_y_axis:
                If not None, the Redwood y-axis will be mapped to the provided axis.
                Resulting coordinate system will always be right-handed.
                This is typically the up-axis.
                Note that NOCS object models are NOT aligned the same as ShapeNetV2.
                To get ShapeNetV2 alignment: -y
                One of: "x", "y", "z", "-x", "-y", "-z"
            remap_x_axis:
                If not None, the original x-axis will be mapped to the provided axis.
                Resulting coordinate system will always be right-handed.
                Note that NOCS object models are NOT aligned the same as ShapeNetV2.
                To get ShapeNetV2 alignment: z
                One of: "x", "y", "z", "-x", "-y", "-z"
            category_str:
                If not None, only samples from the matching category will be returned.
                See AnnotatedRedwoodDataset.category_id_to_str for admissible category
                strings.
        """

        root_dir: str
        ann_dir: str
        split: str
        mask_pointcloud: bool
        normalize_pointcloud: bool
        scale_convention: str
        camera_convention: str
        orientation_repr: str
        orientation_grid_resolution: int
        remap_y_axis: Optional[str]
        remap_x_axis: Optional[str]
        category_str: Optional[str]

    default_config: Config = {
        "root_dir": None,
        "ann_dir": None,
        "mask_pointcloud": False,
        "normalize_pointcloud": False,
        "camera_convention": "opengl",
        "scale_convention": "half_max",
        "orientation_repr": "quaternion",
        "orientation_grid_resolution": None,
        "category_str": None,
        "remap_y_axis": None,
        "remap_x_axis": None,
    }

    def __init__(
        self,
        config: Config,
    ) -> None:
        """Initialize the dataset.

        Args:
            config:
                Configuration dictionary of dataset. Provided dictionary will be merged
                with default_dict. See AnnotatedRedwoodDataset.Config for keys.
        """
        config = yoco.load_config(
            config, current_dict=AnnotatedRedwoodDataset.default_config
        )
        self._root_dir = config["root_dir"]
        self._ann_dir = config["ann_dir"]
        self._camera_convention = config["camera_convention"]
        self._mask_pointcloud = config["mask_pointcloud"]
        self._normalize_pointcloud = config["normalize_pointcloud"]
        self._scale_convention = config["scale_convention"]
        self._remap_y_axis = config["remap_y_axis"]
        self._remap_x_axis = config["remap_x_axis"]
        self._orientation_repr = config["orientation_repr"]
        if self._orientation_repr == "discretized":
            self._orientation_grid = so3grid.SO3Grid(
                config["orientation_grid_resolution"]
            )
        self._load_annotations()
        self._camera = Camera(width=640, height=480, fx=525, fy=525, cx=319.5, cy=239.5)

    def _load_annotations(self) -> None:
        """Load annotations into memory."""
        ann_json = os.path.join(self._ann_dir, "annotations.json")
        with open(ann_json, "r") as f:
            anns_dict = json.load(f)
        self._raw_samples = []
        for seq_id, seq_anns in anns_dict.items():
            for pose_ann in seq_anns["pose_anns"]:
                self._raw_samples.append(
                    self._create_raw_sample(seq_id, seq_anns, pose_ann)
                )

    def _create_raw_sample(
        self, seq_id: str, sequence_dict: dict, annotation_dict: dict
    ) -> dict:
        """Create raw sample from information in annotations file."""
        position = torch.tensor(annotation_dict["position"])
        orientation_q = torch.tensor(annotation_dict["orientation"])
        rgb_filename = annotation_dict["rgb_file"]
        depth_filename = annotation_dict["depth_file"]
        mesh_filename = sequence_dict["mesh"]
        mesh_path = os.path.join(self._ann_dir, mesh_filename)
        category_str = sequence_dict["category"]
        color_path = os.path.join(
            self._root_dir, category_str, "rgbd", seq_id, "rgb", rgb_filename
        )
        depth_path = os.path.join(
            self._root_dir, category_str, "rgbd", seq_id, "depth", depth_filename
        )
        extents = torch.tensor(sequence_dict["scale"]) * 2
        return {
            "position": position,
            "orientation_q": orientation_q,
            "extents": extents,
            "color_path": color_path,
            "depth_path": depth_path,
            "mesh_path": mesh_path,
            "category_str": category_str,
        }

    def __len__(self) -> int:
        """Return number of sample in dataset."""
        return len(self._raw_samples)

    def __getitem__(self, idx: int) -> dict:
        """Return a sample of the dataset.

        Args:
            idx: Index of the instance.
        Returns:
            Sample containing the following keys:
                "color"
                "depth"
                "mask"
                "pointset"
                "position"
                "orientation"
                "quaternion"
                "scale"
                "color_path"
                "obj_path"
                "category_id"
                "category_str"
        """
        raw_sample = self._raw_samples[idx]
        color = torch.from_numpy(
            np.asarray(Image.open(raw_sample["color_path"]), dtype=np.float32) / 255
        )
        depth = self._load_depth(raw_sample["depth_path"])
        instance_mask = self._compute_mask(depth, raw_sample)

        pointcloud_mask = instance_mask if self._mask_pointcloud else None
        pointcloud = pointset_utils.depth_to_pointcloud(
            depth,
            self._camera,
            mask=pointcloud_mask,
            convention=self._camera_convention,
        )

        # adjust camera convention for position, orientation and scale
        position = pointset_utils.change_position_camera_convention(
            raw_sample["position"], "opencv", self._camera_convention
        )

        # orientation / scale
        orientation_q, extents = self._change_axis_convention(
            raw_sample["orientation_q"], raw_sample["extents"]
        )
        orientation_q = pointset_utils.change_orientation_camera_convention(
            orientation_q, "opencv", self._camera_convention
        )
        orientation = self._quat_to_orientation_repr(orientation_q)
        scale = self._get_scale(extents)

        # normalize pointcloud & position
        if self._normalize_pointcloud:
            pointcloud, centroid = pointset_utils.normalize_points(pointcloud)
            position = position - centroid

        category_str = raw_sample["category_str"]
        sample = {
            "color": color,
            "depth": depth,
            "pointset": pointcloud,
            "mask": instance_mask,
            "position": position,
            "orientation": orientation,
            "quaternion": orientation_q,
            "scale": scale,
            "color_path": raw_sample["color_path"],
            "obj_path": raw_sample["mesh_path"],
            "category_id": self.category_str_to_id[category_str],
            "category_str": category_str,
        }
        return sample

    def _compute_mask(self, depth: torch.Tensor, raw_sample: dict) -> torch.Tensor:
        mesh = synthetic.Mesh(
            path=raw_sample["mesh_path"],
            scale=1.0,  # do not resize mesh, as it is already at right size
            rel_scale=True,
            center=False,
        )
        mesh.position = raw_sample["position"]
        mesh.orientation = raw_sample["orientation_q"]
        gt_depth = torch.from_numpy(synthetic.draw_depth_geometry(mesh, self._camera))
        mask = gt_depth != 0
        # exclude occluded parts from mask
        mask[(depth != 0) * (depth < gt_depth - 0.01)] = 0
        return mask

    def _load_depth(self, depth_path: str) -> torch.Tensor:
        """Load depth from depth filepath."""
        depth = torch.from_numpy(
            np.asarray(Image.open(depth_path), dtype=np.float32) * 0.001
        )
        return depth

    def _get_scale(self, extents: torch.Tensor) -> float:
        """Return scale from stored sample data and extents."""
        if self._scale_convention == "diagonal":
            return torch.linalg.norm(extents)
        elif self._scale_convention == "max":
            return extents.max()
        elif self._scale_convention == "half_max":
            return 0.5 * extents.max()
        elif self._scale_convention == "full":
            return extents
        else:
            raise ValueError(
                f"Specified scale convention {self._scale_convention} not supported."
            )

    def _change_axis_convention(
        self, orientation_q: torch.Tensor, extents: torch.Tensor
    ) -> tuple:
        """Adjust up-axis for orientation and extents.

        Returns:
            Tuple of position, orienation_q and extents, with specified up-axis.
        """
        if self._remap_y_axis is None and self._remap_x_axis is None:
            return orientation_q, extents
        elif self._remap_y_axis is None or self._remap_x_axis is None:
            raise ValueError("Either both or none of remap_{y,x}_axis have to be None.")

        rotation_o2n = self._get_o2n_object_rotation_matrix()
        remapped_extents = torch.abs(torch.Tensor(rotation_o2n) @ extents)

        # quaternion so far: original -> camera
        # we want a quaternion: new -> camera
        rotation_n2o = rotation_o2n.T

        quaternion_n2o = torch.from_numpy(Rotation.from_matrix(rotation_n2o).as_quat())

        remapped_orientation_q = quaternion_utils.quaternion_multiply(
            orientation_q, quaternion_n2o
        )  # new -> original -> camera

        return remapped_orientation_q, remapped_extents

    def _get_o2n_object_rotation_matrix(self) -> np.ndarray:
        """Compute rotation matrix which rotates original to new object coordinates."""
        rotation_o2n = np.zeros((3, 3))  # original to new object convention
        if self._remap_y_axis == "x":
            rotation_o2n[0, 1] = 1
        elif self._remap_y_axis == "-x":
            rotation_o2n[0, 1] = -1
        elif self._remap_y_axis == "y":
            rotation_o2n[1, 1] = 1
        elif self._remap_y_axis == "-y":
            rotation_o2n[1, 1] = -1
        elif self._remap_y_axis == "z":
            rotation_o2n[2, 1] = 1
        elif self._remap_y_axis == "-z":
            rotation_o2n[2, 1] = -1
        else:
            raise ValueError("Unsupported remap_y_axis {self.remap_y}")

        if self._remap_x_axis == "x":
            rotation_o2n[0, 0] = 1
        elif self._remap_x_axis == "-x":
            rotation_o2n[0, 0] = -1
        elif self._remap_x_axis == "y":
            rotation_o2n[1, 0] = 1
        elif self._remap_x_axis == "-y":
            rotation_o2n[1, 0] = -1
        elif self._remap_x_axis == "z":
            rotation_o2n[2, 0] = 1
        elif self._remap_x_axis == "-z":
            rotation_o2n[2, 0] = -1
        else:
            raise ValueError("Unsupported remap_x_axis {self.remap_y}")

        # infer last column
        rotation_o2n[:, 2] = 1 - np.abs(np.sum(rotation_o2n, 1))  # rows must sum to +-1
        rotation_o2n[:, 2] *= np.linalg.det(rotation_o2n)  # make special orthogonal
        if np.linalg.det(rotation_o2n) != 1.0:  # check if special orthogonal
            raise ValueError("Unsupported combination of remap_{y,x}_axis. det != 1")
        return rotation_o2n

    def _quat_to_orientation_repr(self, quaternion: torch.Tensor) -> torch.Tensor:
        """Convert quaternion to selected orientation representation.

        Args:
            quaternion:
                The quaternion to convert, scalar-last, shape (4,).
        Returns:
            The same orientation as represented by the quaternion in the chosen
            orientation representation.
        """
        if self._orientation_repr == "quaternion":
            return quaternion
        elif self._orientation_repr == "discretized":
            index = self._orientation_grid.quat_to_index(quaternion.numpy())
            return torch.tensor(
                index,
                dtype=torch.long,
            )
        else:
            raise NotImplementedError(
                f"Orientation representation {self._orientation_repr} is not supported."
            )

    def load_mesh(self, object_path: str) -> o3d.geometry.TriangleMesh:
        """Load an object mesh and adjust its object frame convention."""
        mesh = o3d.io.read_triangle_mesh(object_path)
        if self._remap_y_axis is None and self._remap_x_axis is None:
            return mesh
        elif self._remap_y_axis is None or self._remap_x_axis is None:
            raise ValueError("Either both or none of remap_{y,x}_axis have to be None.")

        rotation_o2n = self._get_o2n_object_rotation_matrix()
        mesh.rotate(
            rotation_o2n,
            center=np.array([0.0, 0.0, 0.0])[:, None],
        )
        return mesh

Config

Bases: TypedDict

Configuration dictionary for annoated Redwood dataset.

Attributes:

Name Type Description
root_dir str

See AnnotatedRedwoodDataset docstring.

ann_dir str

See AnnotatedRedwoodDataset docstring.

mask_pointcloud bool

Whether the returned pointcloud will be masked.

normalize_pointcloud bool

Whether the returned pointcloud and position will be normalized, such that pointcloud centroid is at the origin.

scale_convention str

Which scale is returned. The following strings are supported: "diagonal": Length of bounding box' diagonal. This is what NOCS uses. "max": Maximum side length of bounding box. "half_max": Half maximum side length of bounding box. "full": Bounding box side lengths. Shape (3,).

camera_convention str

Which camera convention is used for position and orientation. One of: "opengl": x right, y up, z back "opencv": x right, y down, z forward Note that this does not influence how the dataset is processed, only the returned position and quaternion.

orientation_repr str

Which orientation representation is used. One of: "quaternion" "discretized"

orientation_grid_resolution int

Resolution of the orientation grid. Only used if orientation_repr is "discretized".

remap_y_axis Optional[str]

If not None, the Redwood y-axis will be mapped to the provided axis. Resulting coordinate system will always be right-handed. This is typically the up-axis. Note that NOCS object models are NOT aligned the same as ShapeNetV2. To get ShapeNetV2 alignment: -y One of: "x", "y", "z", "-x", "-y", "-z"

remap_x_axis Optional[str]

If not None, the original x-axis will be mapped to the provided axis. Resulting coordinate system will always be right-handed. Note that NOCS object models are NOT aligned the same as ShapeNetV2. To get ShapeNetV2 alignment: z One of: "x", "y", "z", "-x", "-y", "-z"

category_str Optional[str]

If not None, only samples from the matching category will be returned. See AnnotatedRedwoodDataset.category_id_to_str for admissible category strings.

Source code in sdfest/initialization/datasets/redwood_dataset.py
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
class Config(TypedDict, total=False):
    """Configuration dictionary for annoated Redwood dataset.

    Attributes:
        root_dir: See AnnotatedRedwoodDataset docstring.
        ann_dir: See AnnotatedRedwoodDataset docstring.
        mask_pointcloud: Whether the returned pointcloud will be masked.
        normalize_pointcloud:
            Whether the returned pointcloud and position will be normalized, such
            that pointcloud centroid is at the origin.
        scale_convention:
            Which scale is returned. The following strings are supported:
                "diagonal":
                    Length of bounding box' diagonal. This is what NOCS uses.
                "max": Maximum side length of bounding box.
                "half_max": Half maximum side length of bounding box.
                "full": Bounding box side lengths. Shape (3,).
        camera_convention:
            Which camera convention is used for position and orientation. One of:
                "opengl": x right, y up, z back
                "opencv": x right, y down, z forward
            Note that this does not influence how the dataset is processed, only the
            returned position and quaternion.
        orientation_repr:
            Which orientation representation is used. One of:
                "quaternion"
                "discretized"
        orientation_grid_resolution:
            Resolution of the orientation grid.
            Only used if orientation_repr is "discretized".
        remap_y_axis:
            If not None, the Redwood y-axis will be mapped to the provided axis.
            Resulting coordinate system will always be right-handed.
            This is typically the up-axis.
            Note that NOCS object models are NOT aligned the same as ShapeNetV2.
            To get ShapeNetV2 alignment: -y
            One of: "x", "y", "z", "-x", "-y", "-z"
        remap_x_axis:
            If not None, the original x-axis will be mapped to the provided axis.
            Resulting coordinate system will always be right-handed.
            Note that NOCS object models are NOT aligned the same as ShapeNetV2.
            To get ShapeNetV2 alignment: z
            One of: "x", "y", "z", "-x", "-y", "-z"
        category_str:
            If not None, only samples from the matching category will be returned.
            See AnnotatedRedwoodDataset.category_id_to_str for admissible category
            strings.
    """

    root_dir: str
    ann_dir: str
    split: str
    mask_pointcloud: bool
    normalize_pointcloud: bool
    scale_convention: str
    camera_convention: str
    orientation_repr: str
    orientation_grid_resolution: int
    remap_y_axis: Optional[str]
    remap_x_axis: Optional[str]
    category_str: Optional[str]

__getitem__(idx)

Return a sample of the dataset.

Parameters:

Name Type Description Default
idx int

Index of the instance.

required

Returns: Sample containing the following keys: "color" "depth" "mask" "pointset" "position" "orientation" "quaternion" "scale" "color_path" "obj_path" "category_id" "category_str"

Source code in sdfest/initialization/datasets/redwood_dataset.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
def __getitem__(self, idx: int) -> dict:
    """Return a sample of the dataset.

    Args:
        idx: Index of the instance.
    Returns:
        Sample containing the following keys:
            "color"
            "depth"
            "mask"
            "pointset"
            "position"
            "orientation"
            "quaternion"
            "scale"
            "color_path"
            "obj_path"
            "category_id"
            "category_str"
    """
    raw_sample = self._raw_samples[idx]
    color = torch.from_numpy(
        np.asarray(Image.open(raw_sample["color_path"]), dtype=np.float32) / 255
    )
    depth = self._load_depth(raw_sample["depth_path"])
    instance_mask = self._compute_mask(depth, raw_sample)

    pointcloud_mask = instance_mask if self._mask_pointcloud else None
    pointcloud = pointset_utils.depth_to_pointcloud(
        depth,
        self._camera,
        mask=pointcloud_mask,
        convention=self._camera_convention,
    )

    # adjust camera convention for position, orientation and scale
    position = pointset_utils.change_position_camera_convention(
        raw_sample["position"], "opencv", self._camera_convention
    )

    # orientation / scale
    orientation_q, extents = self._change_axis_convention(
        raw_sample["orientation_q"], raw_sample["extents"]
    )
    orientation_q = pointset_utils.change_orientation_camera_convention(
        orientation_q, "opencv", self._camera_convention
    )
    orientation = self._quat_to_orientation_repr(orientation_q)
    scale = self._get_scale(extents)

    # normalize pointcloud & position
    if self._normalize_pointcloud:
        pointcloud, centroid = pointset_utils.normalize_points(pointcloud)
        position = position - centroid

    category_str = raw_sample["category_str"]
    sample = {
        "color": color,
        "depth": depth,
        "pointset": pointcloud,
        "mask": instance_mask,
        "position": position,
        "orientation": orientation,
        "quaternion": orientation_q,
        "scale": scale,
        "color_path": raw_sample["color_path"],
        "obj_path": raw_sample["mesh_path"],
        "category_id": self.category_str_to_id[category_str],
        "category_str": category_str,
    }
    return sample

__init__(config)

Initialize the dataset.

Parameters:

Name Type Description Default
config Config

Configuration dictionary of dataset. Provided dictionary will be merged with default_dict. See AnnotatedRedwoodDataset.Config for keys.

required
Source code in sdfest/initialization/datasets/redwood_dataset.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def __init__(
    self,
    config: Config,
) -> None:
    """Initialize the dataset.

    Args:
        config:
            Configuration dictionary of dataset. Provided dictionary will be merged
            with default_dict. See AnnotatedRedwoodDataset.Config for keys.
    """
    config = yoco.load_config(
        config, current_dict=AnnotatedRedwoodDataset.default_config
    )
    self._root_dir = config["root_dir"]
    self._ann_dir = config["ann_dir"]
    self._camera_convention = config["camera_convention"]
    self._mask_pointcloud = config["mask_pointcloud"]
    self._normalize_pointcloud = config["normalize_pointcloud"]
    self._scale_convention = config["scale_convention"]
    self._remap_y_axis = config["remap_y_axis"]
    self._remap_x_axis = config["remap_x_axis"]
    self._orientation_repr = config["orientation_repr"]
    if self._orientation_repr == "discretized":
        self._orientation_grid = so3grid.SO3Grid(
            config["orientation_grid_resolution"]
        )
    self._load_annotations()
    self._camera = Camera(width=640, height=480, fx=525, fy=525, cx=319.5, cy=239.5)

__len__()

Return number of sample in dataset.

Source code in sdfest/initialization/datasets/redwood_dataset.py
186
187
188
def __len__(self) -> int:
    """Return number of sample in dataset."""
    return len(self._raw_samples)

load_mesh(object_path)

Load an object mesh and adjust its object frame convention.

Source code in sdfest/initialization/datasets/redwood_dataset.py
390
391
392
393
394
395
396
397
398
399
400
401
402
403
def load_mesh(self, object_path: str) -> o3d.geometry.TriangleMesh:
    """Load an object mesh and adjust its object frame convention."""
    mesh = o3d.io.read_triangle_mesh(object_path)
    if self._remap_y_axis is None and self._remap_x_axis is None:
        return mesh
    elif self._remap_y_axis is None or self._remap_x_axis is None:
        raise ValueError("Either both or none of remap_{y,x}_axis have to be None.")

    rotation_o2n = self._get_o2n_object_rotation_matrix()
    mesh.rotate(
        rotation_o2n,
        center=np.array([0.0, 0.0, 0.0])[:, None],
    )
    return mesh

ObjectError

Bases: Exception

Error if something with the mesh is wrong.

Source code in sdfest/initialization/datasets/redwood_dataset.py
406
407
408
409
class ObjectError(Exception):
    """Error if something with the mesh is wrong."""

    pass