Skip to content

extract_net_tensor

extract_net_tensor(config_file, model_checkpoint, image_path, output_folder, net_layer_name, device='cuda:0')

Process an image with pytorch model intercepting live tensor output from the model layers.

Parameters:

Name Type Description Default
config_file str

path to model config file

required
model_checkpoint str

pytorch .pth checkpoint file

required
image_path str

path to input image

required
output_folder str

captured tensor output will be saved to this folder. If it does not exist, it will be created. indicates the layer from which the tensor output will be extracted. You can use print(model) for checking the layer name.

required
device str

devide used for inference

'cuda:0'
Source code in src/stages/model/explore/extract_net_tensor.py
@logger.catch(reraise=True)
def extract_net_tensor(
    config_file: str,
    model_checkpoint: str,
    image_path: str,
    output_folder: str,
    net_layer_name: str,
    device: str = "cuda:0",
) -> None:
    """Process an image with pytorch model intercepting live tensor output from the model layers.

    Args:
        config_file:
            path to model config file
        model_checkpoint:
            pytorch `.pth` checkpoint file
        image_path:
            path to input image
        output_folder:
            captured tensor output will be saved to this folder.
            If it does not exist, it will be created.
            indicates the layer from which the tensor output will be extracted.
            You can use print(model) for checking the layer name.
        device:
            devide used for inference
    """
    import numpy as np
    from src.stages.experiment.master_stage import _get_task

    tensor_bag.clear()

    init_model, inference_model = load_functions(_get_task())

    model = init_model(config_file, model_checkpoint, device=device)
    target_layer = attrgetter(net_layer_name)(model)

    target_layer.register_forward_hook(hook)
    inference_model(model, image_path)
    cpu_tensor_bag = [detach_from_gpu(tensor) for tensor in tensor_bag]
    Path(output_folder).mkdir(parents=True, exist_ok=True)
    np.save(
        Path(output_folder) / "tensor_extracted_values.npy", np.array(cpu_tensor_bag)
    )